@livestore/webmesh 0.3.0-dev.5 → 0.3.0-dev.51

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (69) hide show
  1. package/README.md +43 -0
  2. package/dist/.tsbuildinfo +1 -1
  3. package/dist/channel/direct-channel-internal.d.ts +26 -0
  4. package/dist/channel/direct-channel-internal.d.ts.map +1 -0
  5. package/dist/channel/direct-channel-internal.js +217 -0
  6. package/dist/channel/direct-channel-internal.js.map +1 -0
  7. package/dist/channel/direct-channel.d.ts +22 -0
  8. package/dist/channel/direct-channel.d.ts.map +1 -0
  9. package/dist/channel/direct-channel.js +153 -0
  10. package/dist/channel/direct-channel.js.map +1 -0
  11. package/dist/channel/proxy-channel.d.ts +3 -3
  12. package/dist/channel/proxy-channel.d.ts.map +1 -1
  13. package/dist/channel/proxy-channel.js +119 -37
  14. package/dist/channel/proxy-channel.js.map +1 -1
  15. package/dist/common.d.ts +47 -19
  16. package/dist/common.d.ts.map +1 -1
  17. package/dist/common.js +13 -5
  18. package/dist/common.js.map +1 -1
  19. package/dist/mesh-schema.d.ts +79 -13
  20. package/dist/mesh-schema.d.ts.map +1 -1
  21. package/dist/mesh-schema.js +59 -10
  22. package/dist/mesh-schema.js.map +1 -1
  23. package/dist/mod.d.ts +2 -2
  24. package/dist/mod.d.ts.map +1 -1
  25. package/dist/mod.js +2 -2
  26. package/dist/mod.js.map +1 -1
  27. package/dist/node.d.ts +56 -23
  28. package/dist/node.d.ts.map +1 -1
  29. package/dist/node.js +323 -115
  30. package/dist/node.js.map +1 -1
  31. package/dist/node.test.d.ts +1 -1
  32. package/dist/node.test.d.ts.map +1 -1
  33. package/dist/node.test.js +489 -157
  34. package/dist/node.test.js.map +1 -1
  35. package/dist/utils.d.ts +4 -4
  36. package/dist/utils.d.ts.map +1 -1
  37. package/dist/utils.js +7 -1
  38. package/dist/utils.js.map +1 -1
  39. package/dist/websocket-edge.d.ts +56 -0
  40. package/dist/websocket-edge.d.ts.map +1 -0
  41. package/dist/websocket-edge.js +93 -0
  42. package/dist/websocket-edge.js.map +1 -0
  43. package/package.json +10 -6
  44. package/src/channel/direct-channel-internal.ts +356 -0
  45. package/src/channel/direct-channel.ts +234 -0
  46. package/src/channel/proxy-channel.ts +344 -234
  47. package/src/common.ts +24 -17
  48. package/src/mesh-schema.ts +73 -20
  49. package/src/mod.ts +2 -2
  50. package/src/node.test.ts +723 -190
  51. package/src/node.ts +482 -156
  52. package/src/utils.ts +13 -2
  53. package/src/websocket-edge.ts +191 -0
  54. package/dist/channel/message-channel.d.ts +0 -20
  55. package/dist/channel/message-channel.d.ts.map +0 -1
  56. package/dist/channel/message-channel.js +0 -183
  57. package/dist/channel/message-channel.js.map +0 -1
  58. package/dist/websocket-connection.d.ts +0 -51
  59. package/dist/websocket-connection.d.ts.map +0 -1
  60. package/dist/websocket-connection.js +0 -74
  61. package/dist/websocket-connection.js.map +0 -1
  62. package/dist/websocket-server.d.ts +0 -7
  63. package/dist/websocket-server.d.ts.map +0 -1
  64. package/dist/websocket-server.js +0 -24
  65. package/dist/websocket-server.js.map +0 -1
  66. package/src/channel/message-channel.ts +0 -354
  67. package/src/websocket-connection.ts +0 -158
  68. package/src/websocket-server.ts +0 -40
  69. package/tsconfig.json +0 -11
@@ -0,0 +1,356 @@
1
+ import { casesHandled, shouldNeverHappen } from '@livestore/utils'
2
+ import type { PubSub } from '@livestore/utils/effect'
3
+ import {
4
+ Deferred,
5
+ Effect,
6
+ Exit,
7
+ OtelTracer,
8
+ Predicate,
9
+ Queue,
10
+ Schema,
11
+ Scope,
12
+ Stream,
13
+ WebChannel,
14
+ } from '@livestore/utils/effect'
15
+
16
+ import { type ChannelName, type MeshNodeName, type MessageQueueItem, packetAsOtelAttributes } from '../common.js'
17
+ import * as MeshSchema from '../mesh-schema.js'
18
+
19
+ export interface MakeDirectChannelArgs {
20
+ nodeName: MeshNodeName
21
+ /** Queue of incoming messages for this channel */
22
+ incomingPacketsQueue: Queue.Queue<MessageQueueItem>
23
+ newEdgeAvailablePubSub: PubSub.PubSub<MeshNodeName>
24
+ channelName: ChannelName
25
+ target: MeshNodeName
26
+ sendPacket: (packet: typeof MeshSchema.DirectChannelPacket.Type) => Effect.Effect<void>
27
+ checkTransferableEdges: (
28
+ packet: typeof MeshSchema.DirectChannelPacket.Type,
29
+ ) => typeof MeshSchema.DirectChannelResponseNoTransferables.Type | undefined
30
+ schema: WebChannel.OutputSchema<any, any, any, any>
31
+ }
32
+
33
+ const makeDeferredResult = Deferred.make<
34
+ WebChannel.WebChannel<any, any>,
35
+ typeof MeshSchema.DirectChannelResponseNoTransferables.Type
36
+ >
37
+
38
+ /**
39
+ * The channel version is important here, as a channel will only be established once both sides have the same version.
40
+ * The version is used to avoid concurrency issues where both sides have different incompatible message ports.
41
+ */
42
+ export const makeDirectChannelInternal = ({
43
+ nodeName,
44
+ incomingPacketsQueue,
45
+ target,
46
+ checkTransferableEdges,
47
+ channelName,
48
+ schema: schema_,
49
+ sendPacket,
50
+ channelVersion,
51
+ scope,
52
+ sourceId,
53
+ }: MakeDirectChannelArgs & {
54
+ channelVersion: number
55
+ /** We're passing in the closeable scope from the wrapping direct channel */
56
+ scope: Scope.CloseableScope
57
+ sourceId: string
58
+ }): Effect.Effect<
59
+ WebChannel.WebChannel<any, any>,
60
+ typeof MeshSchema.DirectChannelResponseNoTransferables.Type,
61
+ Scope.Scope
62
+ > =>
63
+ Effect.gen(function* () {
64
+ // yield* Effect.addFinalizer((exit) =>
65
+ // Effect.spanEvent(`shutdown:${exit._tag === 'Success' ? 'Success' : Cause.pretty(exit.cause)}`),
66
+ // )
67
+
68
+ type ChannelState =
69
+ | {
70
+ _tag: 'Initial'
71
+ }
72
+ | {
73
+ _tag: 'RequestSent'
74
+ reqPacketId: string
75
+ }
76
+ | {
77
+ _tag: 'winner:ResponseSent'
78
+ channel: WebChannel.WebChannel<any, any>
79
+ otherSourceId: string
80
+ }
81
+ | {
82
+ _tag: 'loser:WaitingForResponse'
83
+ otherSourceId: string
84
+ }
85
+ | {
86
+ _tag: 'Established'
87
+ otherSourceId: string
88
+ }
89
+
90
+ const deferred = yield* makeDeferredResult()
91
+
92
+ const span = yield* OtelTracer.currentOtelSpan.pipe(Effect.catchAll(() => Effect.succeed(undefined)))
93
+ // const span = {
94
+ // addEvent: (...msg: any[]) => console.log(`${nodeName}→${channelName}→${target}[${channelVersion}]`, ...msg),
95
+ // }
96
+
97
+ const schema = {
98
+ send: Schema.Union(schema_.send, MeshSchema.DirectChannelPing, MeshSchema.DirectChannelPong),
99
+ listen: Schema.Union(schema_.listen, MeshSchema.DirectChannelPing, MeshSchema.DirectChannelPong),
100
+ }
101
+
102
+ const channelStateRef: { current: ChannelState } = {
103
+ current: { _tag: 'Initial' },
104
+ }
105
+
106
+ const processMessagePacket = ({ packet, respondToSender }: MessageQueueItem) =>
107
+ Effect.gen(function* () {
108
+ const channelState = channelStateRef.current
109
+
110
+ span?.addEvent(`process:${packet._tag}`, {
111
+ channelState: channelState._tag,
112
+ packetId: packet.id,
113
+ packetReqId: packet.reqId,
114
+ packetChannelVersion: Predicate.hasProperty('channelVersion')(packet) ? packet.channelVersion : undefined,
115
+ })
116
+
117
+ // const reqIdStr =
118
+ // Predicate.hasProperty('reqId')(packet) && packet.reqId !== undefined ? ` for ${packet.reqId}` : ''
119
+ // yield* Effect.log(
120
+ // `${nodeName}→${channelName}→${target}[${channelVersion}]:process packet ${packet._tag} [${packet.id}${reqIdStr}], channel state: ${channelState._tag}`,
121
+ // )
122
+
123
+ if (channelState._tag === 'Initial') return shouldNeverHappen()
124
+
125
+ if (packet._tag === 'DirectChannelResponseNoTransferables') {
126
+ yield* Deferred.fail(deferred, packet)
127
+ return 'close'
128
+ }
129
+
130
+ // If the other side has a higher version, we need to close this channel and
131
+ // recreate it with the new version
132
+ if (packet.channelVersion > channelVersion) {
133
+ span?.addEvent(`incoming packet has higher version (${packet.channelVersion}), closing channel`)
134
+ yield* Scope.close(scope, Exit.succeed('higher-version-expected'))
135
+ // TODO include expected version in the error so the channel gets recreated with the new version
136
+ return 'close'
137
+ }
138
+
139
+ // If this channel has a higher version, we need to signal the other side to close
140
+ // and recreate the channel with the new version
141
+ if (packet.channelVersion < channelVersion) {
142
+ const newPacket = MeshSchema.DirectChannelRequest.make({
143
+ source: nodeName,
144
+ sourceId,
145
+ target,
146
+ channelName,
147
+ channelVersion,
148
+ hops: [],
149
+ remainingHops: packet.hops,
150
+ reqId: undefined,
151
+ })
152
+ span?.addEvent(
153
+ `incoming packet has lower version (${packet.channelVersion}), sending request to reconnect (${newPacket.id})`,
154
+ )
155
+
156
+ yield* sendPacket(newPacket)
157
+
158
+ return
159
+ }
160
+
161
+ if (channelState._tag === 'Established' && packet._tag === 'DirectChannelRequest') {
162
+ if (packet.sourceId === channelState.otherSourceId) {
163
+ return
164
+ } else {
165
+ // In case the instance of the source has changed, we need to close the channel
166
+ // and reconnect with a new channel
167
+ span?.addEvent(`force-new-channel`)
168
+ yield* Scope.close(scope, Exit.succeed('force-new-channel'))
169
+ return 'close'
170
+ }
171
+ }
172
+
173
+ switch (packet._tag) {
174
+ // Assumption: Each side has sent an initial request and another request as a response for an incoming request
175
+ case 'DirectChannelRequest': {
176
+ if (channelState._tag !== 'RequestSent') {
177
+ // We can safely ignore further incoming requests as we're already creating a channel
178
+ return
179
+ }
180
+
181
+ if (packet.reqId === channelState.reqPacketId) {
182
+ // Circuit-breaker: We've already sent a request so we don't need to send another one
183
+ } else {
184
+ const newRequestPacket = MeshSchema.DirectChannelRequest.make({
185
+ source: nodeName,
186
+ sourceId,
187
+ target,
188
+ channelName,
189
+ channelVersion,
190
+ hops: [],
191
+ remainingHops: packet.hops,
192
+ reqId: packet.id,
193
+ })
194
+ span?.addEvent(`Re-sending new request (${newRequestPacket.id}) for incoming request (${packet.id})`)
195
+
196
+ yield* sendPacket(newRequestPacket)
197
+ }
198
+
199
+ const isWinner = nodeName > target
200
+
201
+ if (isWinner) {
202
+ span?.addEvent(`winner side: creating direct channel and sending response`)
203
+ const mc = new MessageChannel()
204
+
205
+ // We're using a direct channel with acks here to make sure messages are not lost
206
+ // which might happen during re-edge scenarios.
207
+ // Also we need to eagerly start listening since we're using the channel "ourselves"
208
+ // for the initial ping-pong sequence.
209
+ const channel = yield* WebChannel.messagePortChannelWithAck({
210
+ port: mc.port1,
211
+ schema,
212
+ debugId: channelVersion,
213
+ }).pipe(Effect.andThen(WebChannel.toOpenChannel))
214
+
215
+ yield* respondToSender(
216
+ MeshSchema.DirectChannelResponseSuccess.make({
217
+ reqId: packet.id,
218
+ target,
219
+ source: nodeName,
220
+ channelName: packet.channelName,
221
+ hops: [],
222
+ remainingHops: packet.hops.slice(0, -1),
223
+ port: mc.port2,
224
+ channelVersion,
225
+ }),
226
+ )
227
+
228
+ channelStateRef.current = { _tag: 'winner:ResponseSent', channel, otherSourceId: packet.sourceId }
229
+
230
+ // span?.addEvent(`winner side: waiting for ping`)
231
+
232
+ // Now we wait for the other side to respond via the channel
233
+ yield* channel.listen.pipe(
234
+ Stream.flatten(),
235
+ Stream.filter(Schema.is(MeshSchema.DirectChannelPing)),
236
+ Stream.take(1),
237
+ Stream.runDrain,
238
+ )
239
+
240
+ // span?.addEvent(`winner side: sending pong`)
241
+
242
+ yield* channel.send(MeshSchema.DirectChannelPong.make({}))
243
+
244
+ span?.addEvent(`winner side: established`)
245
+ channelStateRef.current = { _tag: 'Established', otherSourceId: packet.sourceId }
246
+
247
+ yield* Deferred.succeed(deferred, channel)
248
+ } else {
249
+ span?.addEvent(`loser side: waiting for response`)
250
+ // Wait for `DirectChannelResponseSuccess` packet
251
+ channelStateRef.current = { _tag: 'loser:WaitingForResponse', otherSourceId: packet.sourceId }
252
+ }
253
+
254
+ break
255
+ }
256
+ case 'DirectChannelResponseSuccess': {
257
+ if (channelState._tag !== 'loser:WaitingForResponse') {
258
+ return shouldNeverHappen(
259
+ `Expected to find direct channel response from ${target}, but was in ${channelState._tag} state`,
260
+ )
261
+ }
262
+
263
+ // See direct-channel notes above
264
+ const channel = yield* WebChannel.messagePortChannelWithAck({
265
+ port: packet.port,
266
+ schema,
267
+ debugId: channelVersion,
268
+ }).pipe(Effect.andThen(WebChannel.toOpenChannel))
269
+
270
+ const waitForPongFiber = yield* channel.listen.pipe(
271
+ Stream.flatten(),
272
+ Stream.filter(Schema.is(MeshSchema.DirectChannelPong)),
273
+ Stream.take(1),
274
+ Stream.runDrain,
275
+ Effect.fork,
276
+ )
277
+
278
+ // span?.addEvent(`loser side: sending ping`)
279
+
280
+ // There seems to be some scenario where the initial ping message is lost.
281
+ // As a workaround until we find the root cause, we're retrying the ping a few times.
282
+ // TODO write a test that reproduces this issue and fix the root cause ()
283
+ // https://github.com/livestorejs/livestore/issues/262
284
+ yield* channel
285
+ .send(MeshSchema.DirectChannelPing.make({}))
286
+ .pipe(Effect.timeout(10), Effect.retry({ times: 2 }))
287
+
288
+ // span?.addEvent(`loser side: waiting for pong`)
289
+
290
+ yield* waitForPongFiber
291
+
292
+ span?.addEvent(`loser side: established`)
293
+ channelStateRef.current = { _tag: 'Established', otherSourceId: channelState.otherSourceId }
294
+
295
+ yield* Deferred.succeed(deferred, channel)
296
+
297
+ return
298
+ }
299
+ default: {
300
+ return casesHandled(packet)
301
+ }
302
+ }
303
+ }).pipe(
304
+ Effect.withSpan(`handleMessagePacket:${packet._tag}:${packet.source}→${packet.target}`, {
305
+ attributes: packetAsOtelAttributes(packet),
306
+ }),
307
+ )
308
+
309
+ yield* Effect.gen(function* () {
310
+ while (true) {
311
+ const packet = yield* Queue.take(incomingPacketsQueue)
312
+ const res = yield* processMessagePacket(packet)
313
+ // We want to give requests another chance to be processed
314
+ if (res === 'close') {
315
+ return
316
+ }
317
+ }
318
+ }).pipe(Effect.interruptible, Effect.tapCauseLogPretty, Effect.forkScoped)
319
+
320
+ const channelState = channelStateRef.current
321
+
322
+ if (channelState._tag !== 'Initial') {
323
+ return shouldNeverHappen(`Expected channel to be in Initial state, but was in ${channelState._tag} state`)
324
+ }
325
+
326
+ const edgeRequest = Effect.gen(function* () {
327
+ const packet = MeshSchema.DirectChannelRequest.make({
328
+ source: nodeName,
329
+ sourceId,
330
+ target,
331
+ channelName,
332
+ channelVersion,
333
+ hops: [],
334
+ reqId: undefined,
335
+ })
336
+
337
+ channelStateRef.current = { _tag: 'RequestSent', reqPacketId: packet.id }
338
+
339
+ // yield* Effect.log(`${nodeName}→${channelName}→${target}:edgeRequest [${channelVersion}]`)
340
+
341
+ const noTransferableResponse = checkTransferableEdges(packet)
342
+ if (noTransferableResponse !== undefined) {
343
+ yield* Effect.spanEvent(`No transferable edges found for ${packet.source}→${packet.target}`)
344
+ return yield* Effect.fail(noTransferableResponse)
345
+ }
346
+
347
+ yield* sendPacket(packet)
348
+ span?.addEvent(`initial edge request sent (${packet.id})`)
349
+ })
350
+
351
+ yield* edgeRequest
352
+
353
+ const channel = yield* deferred
354
+
355
+ return channel
356
+ }).pipe(Effect.withSpanScoped(`makeDirectChannel:${channelVersion}`))
@@ -0,0 +1,234 @@
1
+ import {
2
+ Cause,
3
+ Deferred,
4
+ Effect,
5
+ Either,
6
+ Exit,
7
+ Option,
8
+ Queue,
9
+ Schema,
10
+ Scope,
11
+ Stream,
12
+ TQueue,
13
+ WebChannel,
14
+ } from '@livestore/utils/effect'
15
+ import { nanoid } from '@livestore/utils/nanoid'
16
+
17
+ import * as WebmeshSchema from '../mesh-schema.js'
18
+ import type { MakeDirectChannelArgs } from './direct-channel-internal.js'
19
+ import { makeDirectChannelInternal } from './direct-channel-internal.js'
20
+
21
+ /**
22
+ * Behaviour:
23
+ * - Waits until there is an initial edge
24
+ * - Automatically reconnects on disconnect
25
+ *
26
+ * Implementation notes:
27
+ * - We've split up the functionality into a wrapper channel and an internal channel.
28
+ * - The wrapper channel is responsible for:
29
+ * - Forwarding send/listen messages to the internal channel (via a queue)
30
+ * - Establishing the initial channel and reconnecting on disconnect
31
+ * - Listening for new edges as a hint to reconnect if not already connected
32
+ * - The wrapper channel maintains a edge counter which is used as the channel version
33
+ *
34
+ * If needed we can also implement further functionality (like heartbeat) in this wrapper channel.
35
+ */
36
+ export const makeDirectChannel = ({
37
+ schema,
38
+ newEdgeAvailablePubSub,
39
+ channelName,
40
+ checkTransferableEdges,
41
+ nodeName,
42
+ incomingPacketsQueue,
43
+ target,
44
+ sendPacket,
45
+ }: MakeDirectChannelArgs) =>
46
+ Effect.scopeWithCloseable((scope) =>
47
+ Effect.gen(function* () {
48
+ /** Only used to identify whether a source is the same instance to know when to reconnect */
49
+ const sourceId = nanoid()
50
+
51
+ const listenQueue = yield* Queue.unbounded<any>()
52
+ const sendQueue = yield* TQueue.unbounded<[msg: any, deferred: Deferred.Deferred<void>]>()
53
+
54
+ const initialEdgeDeferred = yield* Deferred.make<void>()
55
+
56
+ const debugInfo = {
57
+ pendingSends: 0,
58
+ totalSends: 0,
59
+ connectCounter: 0,
60
+ isConnected: false,
61
+ innerChannelRef: { current: undefined as WebChannel.WebChannel<any, any> | undefined },
62
+ }
63
+
64
+ // #region reconnect-loop
65
+ yield* Effect.gen(function* () {
66
+ const resultDeferred = yield* Deferred.make<{
67
+ channel: WebChannel.WebChannel<any, any>
68
+ channelVersion: number
69
+ makeDirectChannelScope: Scope.CloseableScope
70
+ }>()
71
+
72
+ while (true) {
73
+ debugInfo.connectCounter++
74
+ const channelVersion = debugInfo.connectCounter
75
+
76
+ yield* Effect.spanEvent(`Connecting#${channelVersion}`)
77
+
78
+ const makeDirectChannelScope = yield* Scope.make()
79
+ // Attach the new scope to the parent scope
80
+ yield* Effect.addFinalizer((ex) => Scope.close(makeDirectChannelScope, ex))
81
+
82
+ /**
83
+ * Expected concurrency behaviour:
84
+ * - We're concurrently running the edge setup and the waitForNewEdgeFiber
85
+ * - Happy path:
86
+ * - The edge setup succeeds and we can interrupt the waitForNewEdgeFiber
87
+ * - Tricky paths:
88
+ * - While a edge is still being setup, we want to re-try when there is a new edge
89
+ * - If the edge setup returns a `DirectChannelResponseNoTransferables` error,
90
+ * we want to wait for a new edge and then re-try
91
+ * - Further notes:
92
+ * - If the parent scope closes, we want to also interrupt both the edge setup and the waitForNewEdgeFiber
93
+ * - We're creating a separate scope for each edge attempt, which
94
+ * - we'll use to fork the message channel in which allows us to interrupt it later
95
+ * - We need to make sure that "interruption" isn't "bubbling out"
96
+ */
97
+ const waitForNewEdgeFiber = yield* Stream.fromPubSub(newEdgeAvailablePubSub).pipe(
98
+ Stream.tap((edgeName) => Effect.spanEvent(`new-conn:${edgeName}`)),
99
+ Stream.take(1),
100
+ Stream.runDrain,
101
+ Effect.as('new-edge' as const),
102
+ Effect.fork,
103
+ )
104
+
105
+ const makeChannel = makeDirectChannelInternal({
106
+ nodeName,
107
+ sourceId,
108
+ incomingPacketsQueue,
109
+ target,
110
+ checkTransferableEdges,
111
+ channelName,
112
+ schema,
113
+ channelVersion,
114
+ newEdgeAvailablePubSub,
115
+ sendPacket,
116
+ scope: makeDirectChannelScope,
117
+ }).pipe(
118
+ Scope.extend(makeDirectChannelScope),
119
+ Effect.forkIn(makeDirectChannelScope),
120
+ // Given we only call `Effect.exit` later when joining the fiber,
121
+ // we don't want Effect to produce a "unhandled error" log message
122
+ Effect.withUnhandledErrorLogLevel(Option.none()),
123
+ )
124
+
125
+ const raceResult = yield* Effect.raceFirst(makeChannel, waitForNewEdgeFiber.pipe(Effect.disconnect))
126
+
127
+ if (raceResult === 'new-edge') {
128
+ yield* Scope.close(makeDirectChannelScope, Exit.fail('new-edge'))
129
+ // We'll try again
130
+ } else {
131
+ const channelExit = yield* raceResult.pipe(Effect.exit)
132
+ if (channelExit._tag === 'Failure') {
133
+ yield* Scope.close(makeDirectChannelScope, channelExit)
134
+
135
+ if (
136
+ Cause.isFailType(channelExit.cause) &&
137
+ Schema.is(WebmeshSchema.DirectChannelResponseNoTransferables)(channelExit.cause.error)
138
+ ) {
139
+ // Only retry when there is a new edge available
140
+ yield* waitForNewEdgeFiber.pipe(Effect.exit)
141
+ }
142
+ } else {
143
+ const channel = channelExit.value
144
+
145
+ yield* Deferred.succeed(resultDeferred, { channel, makeDirectChannelScope, channelVersion })
146
+ break
147
+ }
148
+ }
149
+ }
150
+
151
+ // Now we wait until the first channel is established
152
+ const { channel, makeDirectChannelScope, channelVersion } = yield* resultDeferred
153
+
154
+ yield* Effect.spanEvent(`Connected#${channelVersion}`)
155
+ debugInfo.isConnected = true
156
+ debugInfo.innerChannelRef.current = channel
157
+
158
+ yield* Deferred.succeed(initialEdgeDeferred, void 0)
159
+
160
+ // We'll now forward all incoming messages to the listen queue
161
+ yield* channel.listen.pipe(
162
+ Stream.flatten(),
163
+ // Stream.tap((msg) => Effect.log(`${target}→${channelName}→${nodeName}:message:${msg.message}`)),
164
+ Stream.tapChunk((chunk) => Queue.offerAll(listenQueue, chunk)),
165
+ Stream.runDrain,
166
+ Effect.tapCauseLogPretty,
167
+ Effect.forkIn(makeDirectChannelScope),
168
+ )
169
+
170
+ yield* Effect.gen(function* () {
171
+ while (true) {
172
+ const [msg, deferred] = yield* TQueue.peek(sendQueue)
173
+ // NOTE we don't need an explicit retry flow here since in case of the channel being closed,
174
+ // the send will never succeed. Meanwhile the send-loop fiber will be interrupted and
175
+ // given we only peeked at the queue, the message to send is still there.
176
+ yield* channel.send(msg)
177
+ yield* Deferred.succeed(deferred, void 0)
178
+ yield* TQueue.take(sendQueue) // Remove the message from the queue
179
+ }
180
+ }).pipe(Effect.forkIn(makeDirectChannelScope))
181
+
182
+ // Wait until the channel is closed and then try to reconnect
183
+ yield* channel.closedDeferred
184
+
185
+ yield* Scope.close(makeDirectChannelScope, Exit.succeed('channel-closed'))
186
+
187
+ yield* Effect.spanEvent(`Disconnected#${channelVersion}`)
188
+ debugInfo.isConnected = false
189
+ debugInfo.innerChannelRef.current = undefined
190
+ }).pipe(
191
+ Effect.scoped, // Additionally scoping here to clean up finalizers after each loop run
192
+ Effect.forever,
193
+ Effect.tapCauseLogPretty,
194
+ Effect.forkScoped,
195
+ )
196
+ // #endregion reconnect-loop
197
+
198
+ const parentSpan = yield* Effect.currentSpan.pipe(Effect.orDie)
199
+
200
+ const send = (message: any) =>
201
+ Effect.gen(function* () {
202
+ const sentDeferred = yield* Deferred.make<void>()
203
+
204
+ debugInfo.pendingSends++
205
+ debugInfo.totalSends++
206
+
207
+ yield* TQueue.offer(sendQueue, [message, sentDeferred])
208
+
209
+ yield* sentDeferred
210
+
211
+ debugInfo.pendingSends--
212
+ }).pipe(Effect.scoped, Effect.withParentSpan(parentSpan))
213
+
214
+ const listen = Stream.fromQueue(listenQueue, { maxChunkSize: 1 }).pipe(Stream.map(Either.right))
215
+
216
+ const closedDeferred = yield* Deferred.make<void>().pipe(Effect.acquireRelease(Deferred.done(Exit.void)))
217
+
218
+ const webChannel = {
219
+ [WebChannel.WebChannelSymbol]: WebChannel.WebChannelSymbol,
220
+ send,
221
+ listen,
222
+ closedDeferred,
223
+ supportsTransferables: true,
224
+ schema,
225
+ debugInfo,
226
+ shutdown: Scope.close(scope, Exit.succeed('shutdown')),
227
+ } satisfies WebChannel.WebChannel<any, any>
228
+
229
+ return {
230
+ webChannel: webChannel as WebChannel.WebChannel<any, any>,
231
+ initialEdgeDeferred,
232
+ }
233
+ }),
234
+ )