@livestore/utils 0.0.55-dev.2 → 0.0.55-dev.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/.tsbuildinfo.json +1 -1
- package/dist/effect/Effect.d.ts +2 -0
- package/dist/effect/Effect.d.ts.map +1 -1
- package/dist/effect/Effect.js +2 -0
- package/dist/effect/Effect.js.map +1 -1
- package/dist/effect/SubscriptionRef.d.ts +2 -2
- package/dist/effect/SubscriptionRef.d.ts.map +1 -1
- package/dist/effect/SubscriptionRef.js +2 -3
- package/dist/effect/SubscriptionRef.js.map +1 -1
- package/dist/effect/index.d.ts +2 -6
- package/dist/effect/index.d.ts.map +1 -1
- package/dist/effect/index.js +2 -8
- package/dist/effect/index.js.map +1 -1
- package/package.json +13 -13
- package/src/effect/Effect.ts +8 -0
- package/src/effect/SubscriptionRef.ts +3 -11
- package/src/effect/index.ts +2 -8
- package/dist/effect/Browser.d.ts +0 -18
- package/dist/effect/Browser.d.ts.map +0 -1
- package/dist/effect/Browser.js +0 -22
- package/dist/effect/Browser.js.map +0 -1
- package/src/effect/browser-worker-tmp/BrowserWorker.ts +0 -26
- package/src/effect/browser-worker-tmp/BrowserWorkerRunner.ts +0 -14
- package/src/effect/browser-worker-tmp/internal/worker.ts +0 -71
- package/src/effect/browser-worker-tmp/internal/workerRunner.ts +0 -119
- package/src/effect/browser-worker-tmp/port-platform-runner.ts +0 -74
- package/src/effect/worker-tmp/Worker.ts +0 -374
- package/src/effect/worker-tmp/WorkerError.ts +0 -79
- package/src/effect/worker-tmp/WorkerRunner.ts +0 -181
- package/src/effect/worker-tmp/internal/worker.ts +0 -417
- package/src/effect/worker-tmp/internal/workerError.ts +0 -6
- package/src/effect/worker-tmp/internal/workerRunner.ts +0 -237
|
@@ -1,417 +0,0 @@
|
|
|
1
|
-
/* eslint-disable prefer-arrow/prefer-arrow-functions */
|
|
2
|
-
import { Transferable } from '@effect/platform'
|
|
3
|
-
import * as Schema from '@effect/schema/Schema'
|
|
4
|
-
import * as Serializable from '@effect/schema/Serializable'
|
|
5
|
-
import * as Arr from 'effect/Array'
|
|
6
|
-
import * as Cause from 'effect/Cause'
|
|
7
|
-
import * as Channel from 'effect/Channel'
|
|
8
|
-
import * as Chunk from 'effect/Chunk'
|
|
9
|
-
import * as Context from 'effect/Context'
|
|
10
|
-
import * as Deferred from 'effect/Deferred'
|
|
11
|
-
import * as Effect from 'effect/Effect'
|
|
12
|
-
import * as Exit from 'effect/Exit'
|
|
13
|
-
import * as Fiber from 'effect/Fiber'
|
|
14
|
-
import { identity, pipe } from 'effect/Function'
|
|
15
|
-
import * as Layer from 'effect/Layer'
|
|
16
|
-
import * as Option from 'effect/Option'
|
|
17
|
-
import * as Pool from 'effect/Pool'
|
|
18
|
-
import * as Queue from 'effect/Queue'
|
|
19
|
-
import * as Schedule from 'effect/Schedule'
|
|
20
|
-
import type * as Scope from 'effect/Scope'
|
|
21
|
-
import * as Stream from 'effect/Stream'
|
|
22
|
-
import * as Tracer from 'effect/Tracer'
|
|
23
|
-
|
|
24
|
-
import type * as Worker from '../Worker.js'
|
|
25
|
-
import { WorkerError } from '../WorkerError.js'
|
|
26
|
-
|
|
27
|
-
/** @internal */
|
|
28
|
-
export const defaultQueue = <I>() =>
|
|
29
|
-
Effect.map(
|
|
30
|
-
Queue.unbounded<readonly [id: number, item: I, span: Option.Option<Tracer.Span>]>(),
|
|
31
|
-
(queue): Worker.WorkerQueue<I> => ({
|
|
32
|
-
offer: (id, item, span) => Queue.offer(queue, [id, item, span]),
|
|
33
|
-
take: Queue.take(queue),
|
|
34
|
-
shutdown: Queue.shutdown(queue),
|
|
35
|
-
}),
|
|
36
|
-
)
|
|
37
|
-
|
|
38
|
-
/** @internal */
|
|
39
|
-
export const PlatformWorkerTypeId: Worker.PlatformWorkerTypeId = Symbol.for(
|
|
40
|
-
'@effect/platform/Worker/PlatformWorker',
|
|
41
|
-
) as Worker.PlatformWorkerTypeId
|
|
42
|
-
|
|
43
|
-
/** @internal */
|
|
44
|
-
export const PlatformWorker = Context.GenericTag<Worker.PlatformWorker>('@effect/platform/Worker/PlatformWorker')
|
|
45
|
-
|
|
46
|
-
/** @internal */
|
|
47
|
-
export const WorkerManagerTypeId: Worker.WorkerManagerTypeId = Symbol.for(
|
|
48
|
-
'@effect/platform/Worker/WorkerManager',
|
|
49
|
-
) as Worker.WorkerManagerTypeId
|
|
50
|
-
|
|
51
|
-
/** @internal */
|
|
52
|
-
export const WorkerManager = Context.GenericTag<Worker.WorkerManager>('@effect/platform/Worker/WorkerManager')
|
|
53
|
-
|
|
54
|
-
/** @internal */
|
|
55
|
-
export const Spawner = Context.GenericTag<Worker.Spawner, Worker.SpawnerFn>('@effect/platform/Worker/Spawner')
|
|
56
|
-
|
|
57
|
-
/** @internal */
|
|
58
|
-
export const makeManager = Effect.gen(function* () {
|
|
59
|
-
const platform = yield* PlatformWorker
|
|
60
|
-
let idCounter = 0
|
|
61
|
-
return WorkerManager.of({
|
|
62
|
-
[WorkerManagerTypeId]: WorkerManagerTypeId,
|
|
63
|
-
spawn<I, O, E>({ encode, initialMessage, queue, transfers = (_) => [] }: Worker.Worker.Options<I>) {
|
|
64
|
-
return Effect.gen(function* (_) {
|
|
65
|
-
const spawn = yield* _(Spawner)
|
|
66
|
-
const id = idCounter++
|
|
67
|
-
let requestIdCounter = 0
|
|
68
|
-
const requestMap = new Map<
|
|
69
|
-
number,
|
|
70
|
-
readonly [Queue.Queue<Exit.Exit<ReadonlyArray<O>, E | WorkerError>>, Deferred.Deferred<void>]
|
|
71
|
-
>()
|
|
72
|
-
const sendQueue = yield* Effect.acquireRelease(
|
|
73
|
-
Queue.unbounded<readonly [message: Worker.Worker.Request, transfers?: ReadonlyArray<unknown>]>(),
|
|
74
|
-
Queue.shutdown,
|
|
75
|
-
)
|
|
76
|
-
|
|
77
|
-
const collector = Transferable.unsafeMakeCollector()
|
|
78
|
-
const wrappedEncode = encode
|
|
79
|
-
? (message: I) =>
|
|
80
|
-
Effect.zipRight(
|
|
81
|
-
collector.clear,
|
|
82
|
-
Effect.provideService(encode(message), Transferable.Collector, collector),
|
|
83
|
-
)
|
|
84
|
-
: Effect.succeed
|
|
85
|
-
|
|
86
|
-
const outbound = queue ?? (yield* defaultQueue<I>())
|
|
87
|
-
yield* Effect.addFinalizer(() => outbound.shutdown)
|
|
88
|
-
|
|
89
|
-
yield* Effect.gen(function* () {
|
|
90
|
-
const readyLatch = yield* Deferred.make<void>()
|
|
91
|
-
const backing = yield* platform.spawn<Worker.Worker.Request, Worker.Worker.Response<E, O>>(spawn(id))
|
|
92
|
-
const send = pipe(
|
|
93
|
-
sendQueue.take,
|
|
94
|
-
Effect.flatMap(([message, transfers]) => backing.send(message, transfers)),
|
|
95
|
-
Effect.forever,
|
|
96
|
-
)
|
|
97
|
-
const take = pipe(
|
|
98
|
-
Queue.take(backing.queue),
|
|
99
|
-
Effect.flatMap((msg) => {
|
|
100
|
-
if (msg[0] === 0) {
|
|
101
|
-
return Deferred.complete(readyLatch, Effect.void)
|
|
102
|
-
}
|
|
103
|
-
return handleMessage(msg[1])
|
|
104
|
-
}),
|
|
105
|
-
Effect.forever,
|
|
106
|
-
)
|
|
107
|
-
return yield* Effect.all(
|
|
108
|
-
[Fiber.join(backing.fiber), Effect.zipRight(Deferred.await(readyLatch), send), take],
|
|
109
|
-
{ concurrency: 'unbounded' },
|
|
110
|
-
)
|
|
111
|
-
}).pipe(
|
|
112
|
-
Effect.scoped,
|
|
113
|
-
Effect.onError((cause) =>
|
|
114
|
-
Effect.forEach(requestMap.values(), ([queue]) => Queue.offer(queue, Exit.failCause(cause))),
|
|
115
|
-
),
|
|
116
|
-
Effect.retry(Schedule.spaced(1000)),
|
|
117
|
-
Effect.annotateLogs({
|
|
118
|
-
package: '@effect/platform',
|
|
119
|
-
module: 'Worker',
|
|
120
|
-
}),
|
|
121
|
-
Effect.interruptible,
|
|
122
|
-
Effect.forkScoped,
|
|
123
|
-
)
|
|
124
|
-
|
|
125
|
-
yield* Effect.addFinalizer(() =>
|
|
126
|
-
Effect.zipRight(
|
|
127
|
-
Effect.forEach(requestMap.values(), ([queue]) => Queue.offer(queue, Exit.failCause(Cause.empty)), {
|
|
128
|
-
discard: true,
|
|
129
|
-
}),
|
|
130
|
-
Effect.sync(() => requestMap.clear()),
|
|
131
|
-
),
|
|
132
|
-
)
|
|
133
|
-
|
|
134
|
-
const handleMessage = (response: Worker.Worker.Response<E, O>) =>
|
|
135
|
-
Effect.suspend(() => {
|
|
136
|
-
const queue = requestMap.get(response[0])
|
|
137
|
-
if (!queue) return Effect.void
|
|
138
|
-
|
|
139
|
-
switch (response[1]) {
|
|
140
|
-
// data
|
|
141
|
-
case 0: {
|
|
142
|
-
return Queue.offer(queue[0], Exit.succeed(response[2]))
|
|
143
|
-
}
|
|
144
|
-
// end
|
|
145
|
-
case 1: {
|
|
146
|
-
return response.length === 2
|
|
147
|
-
? Queue.offer(queue[0], Exit.failCause(Cause.empty))
|
|
148
|
-
: Effect.zipRight(
|
|
149
|
-
Queue.offer(queue[0], Exit.succeed(response[2])),
|
|
150
|
-
Queue.offer(queue[0], Exit.failCause(Cause.empty)),
|
|
151
|
-
)
|
|
152
|
-
}
|
|
153
|
-
// error / defect
|
|
154
|
-
case 2:
|
|
155
|
-
case 3: {
|
|
156
|
-
return Queue.offer(
|
|
157
|
-
queue[0],
|
|
158
|
-
response[1] === 2
|
|
159
|
-
? Exit.fail(response[2])
|
|
160
|
-
: Exit.failCause(WorkerError.decodeCause(response[2] as any)),
|
|
161
|
-
)
|
|
162
|
-
}
|
|
163
|
-
}
|
|
164
|
-
})
|
|
165
|
-
|
|
166
|
-
const executeAcquire = (request: I) =>
|
|
167
|
-
Effect.tap(
|
|
168
|
-
Effect.all([
|
|
169
|
-
Effect.sync(() => requestIdCounter++),
|
|
170
|
-
Queue.unbounded<Exit.Exit<ReadonlyArray<O>, E | WorkerError>>(),
|
|
171
|
-
Deferred.make<void>(),
|
|
172
|
-
Effect.map(
|
|
173
|
-
Effect.serviceOption(Tracer.ParentSpan),
|
|
174
|
-
Option.filter((span): span is Tracer.Span => span._tag === 'Span'),
|
|
175
|
-
),
|
|
176
|
-
]),
|
|
177
|
-
([id, queue, deferred, span]) =>
|
|
178
|
-
Effect.suspend(() => {
|
|
179
|
-
requestMap.set(id, [queue, deferred])
|
|
180
|
-
return outbound.offer(id, request, span)
|
|
181
|
-
}),
|
|
182
|
-
)
|
|
183
|
-
|
|
184
|
-
const executeRelease = (
|
|
185
|
-
[id, , deferred]: [
|
|
186
|
-
number,
|
|
187
|
-
Queue.Queue<Exit.Exit<ReadonlyArray<O>, E | WorkerError>>,
|
|
188
|
-
Deferred.Deferred<void>,
|
|
189
|
-
Option.Option<Tracer.Span>,
|
|
190
|
-
],
|
|
191
|
-
exit: Exit.Exit<unknown, unknown>,
|
|
192
|
-
) => {
|
|
193
|
-
const release = Effect.zipRight(
|
|
194
|
-
Deferred.complete(deferred, Effect.void),
|
|
195
|
-
Effect.sync(() => requestMap.delete(id)),
|
|
196
|
-
)
|
|
197
|
-
return Exit.isFailure(exit) ? Effect.zipRight(sendQueue.offer([[id, 1]]), release) : release
|
|
198
|
-
}
|
|
199
|
-
|
|
200
|
-
const execute = (request: I) =>
|
|
201
|
-
Stream.flatMap(Stream.acquireRelease(executeAcquire(request), executeRelease), ([, queue]) => {
|
|
202
|
-
const loop: Channel.Channel<
|
|
203
|
-
Chunk.Chunk<O>,
|
|
204
|
-
unknown,
|
|
205
|
-
E | WorkerError,
|
|
206
|
-
unknown,
|
|
207
|
-
void,
|
|
208
|
-
unknown
|
|
209
|
-
> = Channel.flatMap(
|
|
210
|
-
Queue.take(queue),
|
|
211
|
-
Exit.match({
|
|
212
|
-
onFailure: (cause) => (Cause.isEmpty(cause) ? Channel.void : Channel.failCause(cause)),
|
|
213
|
-
onSuccess: (value) => Channel.flatMap(Channel.write(Chunk.unsafeFromArray(value)), () => loop),
|
|
214
|
-
}),
|
|
215
|
-
)
|
|
216
|
-
return Stream.fromChannel(loop)
|
|
217
|
-
})
|
|
218
|
-
|
|
219
|
-
const executeEffect = (request: I) =>
|
|
220
|
-
Effect.acquireUseRelease(
|
|
221
|
-
executeAcquire(request),
|
|
222
|
-
([, queue]) => Effect.flatMap(Queue.take(queue), Exit.map(Arr.unsafeGet(0))),
|
|
223
|
-
executeRelease,
|
|
224
|
-
)
|
|
225
|
-
|
|
226
|
-
yield* outbound.take.pipe(
|
|
227
|
-
Effect.flatMap(([id, request, span]) =>
|
|
228
|
-
Effect.fork(
|
|
229
|
-
Effect.suspend(() => {
|
|
230
|
-
const result = requestMap.get(id)
|
|
231
|
-
if (!result) return Effect.void
|
|
232
|
-
const transferables = transfers(request)
|
|
233
|
-
const spanTuple = Option.getOrUndefined(
|
|
234
|
-
Option.map(span, (span) => [span.traceId, span.spanId, span.sampled] as const),
|
|
235
|
-
)
|
|
236
|
-
return pipe(
|
|
237
|
-
Effect.flatMap(wrappedEncode(request), (payload) =>
|
|
238
|
-
sendQueue.offer([
|
|
239
|
-
[id, 0, payload, spanTuple],
|
|
240
|
-
[...transferables, ...collector.unsafeRead()],
|
|
241
|
-
]),
|
|
242
|
-
),
|
|
243
|
-
Effect.catchAllCause((cause) => Queue.offer(result[0], Exit.failCause(cause))),
|
|
244
|
-
Effect.zipRight(Deferred.await(result[1])),
|
|
245
|
-
)
|
|
246
|
-
}),
|
|
247
|
-
),
|
|
248
|
-
),
|
|
249
|
-
Effect.forever,
|
|
250
|
-
Effect.forkScoped,
|
|
251
|
-
Effect.interruptible,
|
|
252
|
-
)
|
|
253
|
-
|
|
254
|
-
if (initialMessage) {
|
|
255
|
-
yield* Effect.sync(initialMessage).pipe(
|
|
256
|
-
Effect.flatMap(executeEffect),
|
|
257
|
-
Effect.mapError((error) => new WorkerError({ reason: 'spawn', error })),
|
|
258
|
-
)
|
|
259
|
-
}
|
|
260
|
-
|
|
261
|
-
return { id, execute, executeEffect }
|
|
262
|
-
}).pipe(Effect.parallelFinalizers)
|
|
263
|
-
},
|
|
264
|
-
})
|
|
265
|
-
})
|
|
266
|
-
|
|
267
|
-
/** @internal */
|
|
268
|
-
export const layerManager = Layer.effect(WorkerManager, makeManager)
|
|
269
|
-
|
|
270
|
-
/** @internal */
|
|
271
|
-
export const makePool = <I, O, E>(options: Worker.WorkerPool.Options<I>) =>
|
|
272
|
-
Effect.gen(function* () {
|
|
273
|
-
const manager = yield* WorkerManager
|
|
274
|
-
const workers = new Set<Worker.Worker<I, O, E>>()
|
|
275
|
-
const acquire = pipe(
|
|
276
|
-
manager.spawn<I, O, E>(options),
|
|
277
|
-
Effect.tap((worker) => Effect.sync(() => workers.add(worker))),
|
|
278
|
-
Effect.tap((worker) => Effect.addFinalizer(() => Effect.sync(() => workers.delete(worker)))),
|
|
279
|
-
options.onCreate ? Effect.tap(options.onCreate) : identity,
|
|
280
|
-
)
|
|
281
|
-
const backing =
|
|
282
|
-
'minSize' in options
|
|
283
|
-
? yield* Pool.makeWithTTL({
|
|
284
|
-
acquire,
|
|
285
|
-
min: options.minSize,
|
|
286
|
-
max: options.maxSize,
|
|
287
|
-
concurrency: options.concurrency,
|
|
288
|
-
targetUtilization: options.targetUtilization,
|
|
289
|
-
timeToLive: options.timeToLive,
|
|
290
|
-
})
|
|
291
|
-
: yield* Pool.make({
|
|
292
|
-
acquire,
|
|
293
|
-
size: options.size,
|
|
294
|
-
concurrency: options.concurrency,
|
|
295
|
-
targetUtilization: options.targetUtilization,
|
|
296
|
-
})
|
|
297
|
-
const pool: Worker.WorkerPool<I, O, E> = {
|
|
298
|
-
backing,
|
|
299
|
-
broadcast: (message: I) =>
|
|
300
|
-
Effect.forEach(workers, (worker) => worker.executeEffect(message), {
|
|
301
|
-
concurrency: 'unbounded',
|
|
302
|
-
discard: true,
|
|
303
|
-
}),
|
|
304
|
-
execute: (message: I) => Stream.unwrapScoped(Effect.map(backing.get, (worker) => worker.execute(message))),
|
|
305
|
-
executeEffect: (message: I) =>
|
|
306
|
-
Effect.scoped(Effect.flatMap(backing.get, (worker) => worker.executeEffect(message))),
|
|
307
|
-
}
|
|
308
|
-
|
|
309
|
-
// report any spawn errors
|
|
310
|
-
yield* Effect.scoped(backing.get)
|
|
311
|
-
|
|
312
|
-
return pool
|
|
313
|
-
})
|
|
314
|
-
|
|
315
|
-
/** @internal */
|
|
316
|
-
export const makePoolLayer = <Tag, I, O, E>(
|
|
317
|
-
tag: Context.Tag<Tag, Worker.WorkerPool<I, O, E>>,
|
|
318
|
-
options: Worker.WorkerPool.Options<I>,
|
|
319
|
-
) => Layer.scoped(tag, makePool(options))
|
|
320
|
-
|
|
321
|
-
/** @internal */
|
|
322
|
-
export const makeSerialized = <I extends Schema.TaggedRequest.All>(
|
|
323
|
-
options: Worker.SerializedWorker.Options<I>,
|
|
324
|
-
): Effect.Effect<Worker.SerializedWorker<I>, WorkerError, Worker.WorkerManager | Worker.Spawner | Scope.Scope> =>
|
|
325
|
-
Effect.gen(function* () {
|
|
326
|
-
const manager = yield* WorkerManager
|
|
327
|
-
const backing = yield* manager.spawn({
|
|
328
|
-
...(options as any),
|
|
329
|
-
encode(message) {
|
|
330
|
-
return Effect.mapError(
|
|
331
|
-
Serializable.serialize(message as any),
|
|
332
|
-
(error) => new WorkerError({ reason: 'encode', error }),
|
|
333
|
-
)
|
|
334
|
-
},
|
|
335
|
-
})
|
|
336
|
-
const execute = <Req extends I>(message: Req) => {
|
|
337
|
-
const parseSuccess = Schema.decode(Serializable.successSchema(message as any))
|
|
338
|
-
const parseFailure = Schema.decode(Serializable.failureSchema(message as any))
|
|
339
|
-
return pipe(
|
|
340
|
-
backing.execute(message),
|
|
341
|
-
Stream.catchAll((error) => Effect.flatMap(parseFailure(error), Effect.fail)),
|
|
342
|
-
Stream.mapEffect(parseSuccess),
|
|
343
|
-
)
|
|
344
|
-
}
|
|
345
|
-
const executeEffect = <Req extends I>(message: Req) => {
|
|
346
|
-
const parseSuccess = Schema.decode(Serializable.successSchema(message as any))
|
|
347
|
-
const parseFailure = Schema.decode(Serializable.failureSchema(message as any))
|
|
348
|
-
return Effect.matchEffect(backing.executeEffect(message), {
|
|
349
|
-
onFailure: (error) => Effect.flatMap(parseFailure(error), Effect.fail),
|
|
350
|
-
onSuccess: parseSuccess,
|
|
351
|
-
})
|
|
352
|
-
}
|
|
353
|
-
return identity<Worker.SerializedWorker<I>>({
|
|
354
|
-
id: backing.id,
|
|
355
|
-
execute: execute as any,
|
|
356
|
-
executeEffect: executeEffect as any,
|
|
357
|
-
})
|
|
358
|
-
})
|
|
359
|
-
|
|
360
|
-
/** @internal */
|
|
361
|
-
export const makePoolSerialized = <I extends Schema.TaggedRequest.All>(
|
|
362
|
-
options: Worker.SerializedWorkerPool.Options<I>,
|
|
363
|
-
) =>
|
|
364
|
-
Effect.gen(function* () {
|
|
365
|
-
const manager = yield* WorkerManager
|
|
366
|
-
const workers = new Set<Worker.SerializedWorker<I>>()
|
|
367
|
-
const acquire = pipe(
|
|
368
|
-
makeSerialized<I>(options),
|
|
369
|
-
Effect.tap((worker) => Effect.sync(() => workers.add(worker))),
|
|
370
|
-
Effect.tap((worker) => Effect.addFinalizer(() => Effect.sync(() => workers.delete(worker)))),
|
|
371
|
-
options.onCreate
|
|
372
|
-
? Effect.tap(options.onCreate as (worker: Worker.SerializedWorker<I>) => Effect.Effect<void, WorkerError>)
|
|
373
|
-
: identity,
|
|
374
|
-
Effect.provideService(WorkerManager, manager),
|
|
375
|
-
)
|
|
376
|
-
const backing = yield* 'timeToLive' in options
|
|
377
|
-
? Pool.makeWithTTL({
|
|
378
|
-
acquire,
|
|
379
|
-
min: options.minSize,
|
|
380
|
-
max: options.maxSize,
|
|
381
|
-
concurrency: options.concurrency,
|
|
382
|
-
targetUtilization: options.targetUtilization,
|
|
383
|
-
timeToLive: options.timeToLive,
|
|
384
|
-
})
|
|
385
|
-
: Pool.make({
|
|
386
|
-
acquire,
|
|
387
|
-
size: options.size,
|
|
388
|
-
concurrency: options.concurrency,
|
|
389
|
-
targetUtilization: options.targetUtilization,
|
|
390
|
-
})
|
|
391
|
-
const pool: Worker.SerializedWorkerPool<I> = {
|
|
392
|
-
backing,
|
|
393
|
-
broadcast: <Req extends I>(message: Req) =>
|
|
394
|
-
Effect.forEach(workers, (worker) => worker.executeEffect(message), {
|
|
395
|
-
concurrency: 'unbounded',
|
|
396
|
-
discard: true,
|
|
397
|
-
}) as any,
|
|
398
|
-
execute: <Req extends I>(message: Req) =>
|
|
399
|
-
Stream.unwrapScoped(Effect.map(backing.get, (worker) => worker.execute(message))) as any,
|
|
400
|
-
executeEffect: <Req extends I>(message: Req) =>
|
|
401
|
-
Effect.scoped(Effect.flatMap(backing.get, (worker) => worker.executeEffect(message))) as any,
|
|
402
|
-
}
|
|
403
|
-
|
|
404
|
-
// report any spawn errors
|
|
405
|
-
yield* Effect.scoped(backing.get)
|
|
406
|
-
|
|
407
|
-
return pool
|
|
408
|
-
})
|
|
409
|
-
|
|
410
|
-
/** @internal */
|
|
411
|
-
export const makePoolSerializedLayer = <Tag, I extends Schema.TaggedRequest.All>(
|
|
412
|
-
tag: Context.Tag<Tag, Worker.SerializedWorkerPool<I>>,
|
|
413
|
-
options: Worker.SerializedWorkerPool.Options<I>,
|
|
414
|
-
) => Layer.scoped(tag, makePoolSerialized(options))
|
|
415
|
-
|
|
416
|
-
/** @internal */
|
|
417
|
-
export const layerSpawner = <W = unknown>(spawner: Worker.SpawnerFn<W>) => Layer.succeed(Spawner, spawner)
|
|
@@ -1,237 +0,0 @@
|
|
|
1
|
-
/* eslint-disable prefer-arrow/prefer-arrow-functions */
|
|
2
|
-
import { Transferable } from '@effect/platform'
|
|
3
|
-
import * as Schema from '@effect/schema/Schema'
|
|
4
|
-
import * as Serializable from '@effect/schema/Serializable'
|
|
5
|
-
import * as Cause from 'effect/Cause'
|
|
6
|
-
import * as Chunk from 'effect/Chunk'
|
|
7
|
-
import * as Context from 'effect/Context'
|
|
8
|
-
import * as Effect from 'effect/Effect'
|
|
9
|
-
import * as Either from 'effect/Either'
|
|
10
|
-
import * as ExecutionStrategy from 'effect/ExecutionStrategy'
|
|
11
|
-
import * as Exit from 'effect/Exit'
|
|
12
|
-
import * as Fiber from 'effect/Fiber'
|
|
13
|
-
import { identity, pipe } from 'effect/Function'
|
|
14
|
-
import * as Layer from 'effect/Layer'
|
|
15
|
-
import * as Option from 'effect/Option'
|
|
16
|
-
import * as Queue from 'effect/Queue'
|
|
17
|
-
import * as Scope from 'effect/Scope'
|
|
18
|
-
import * as Stream from 'effect/Stream'
|
|
19
|
-
|
|
20
|
-
import type * as Worker from '../Worker.js'
|
|
21
|
-
import { isWorkerError, WorkerError } from '../WorkerError.js'
|
|
22
|
-
import type * as WorkerRunner from '../WorkerRunner.js'
|
|
23
|
-
|
|
24
|
-
/** @internal */
|
|
25
|
-
export const PlatformRunnerTypeId: WorkerRunner.PlatformRunnerTypeId = Symbol.for(
|
|
26
|
-
'@effect/platform/Runner/PlatformRunner',
|
|
27
|
-
) as WorkerRunner.PlatformRunnerTypeId
|
|
28
|
-
|
|
29
|
-
/** @internal */
|
|
30
|
-
export const PlatformRunner = Context.GenericTag<WorkerRunner.PlatformRunner>('@effect/platform/Runner/PlatformRunner')
|
|
31
|
-
|
|
32
|
-
/** @internal */
|
|
33
|
-
export const make = <I, E, R, O>(
|
|
34
|
-
process: (request: I) => Stream.Stream<O, E, R> | Effect.Effect<O, E, R>,
|
|
35
|
-
options?: WorkerRunner.Runner.Options<I, O, E>,
|
|
36
|
-
) =>
|
|
37
|
-
Effect.gen(function* (_) {
|
|
38
|
-
const scope = yield* _(Scope.fork(yield* _(Effect.scope), ExecutionStrategy.parallel))
|
|
39
|
-
const fiber = Option.getOrThrow(Fiber.getCurrentFiber())
|
|
40
|
-
const shutdown = Effect.zipRight(Scope.close(scope, Exit.void), Fiber.interruptFork(fiber))
|
|
41
|
-
const platform = yield* _(PlatformRunner)
|
|
42
|
-
const backing = yield* _(
|
|
43
|
-
platform.start<Worker.Worker.Request<I>, Worker.Worker.Response<E>>(shutdown),
|
|
44
|
-
Scope.extend(scope),
|
|
45
|
-
)
|
|
46
|
-
const fiberMap = new Map<number, Fiber.Fiber<void, unknown>>()
|
|
47
|
-
|
|
48
|
-
yield* _(
|
|
49
|
-
Queue.take(backing.queue),
|
|
50
|
-
options?.decode
|
|
51
|
-
? Effect.flatMap((msg): Effect.Effect<readonly [portId: number, Worker.Worker.Request<I>], WorkerError> => {
|
|
52
|
-
const req = msg[1]
|
|
53
|
-
if (req[1] === 1) {
|
|
54
|
-
return Effect.succeed(msg)
|
|
55
|
-
}
|
|
56
|
-
|
|
57
|
-
return Effect.map(options.decode!(req[2]), (data) => [msg[0], [req[0], req[1], data, req[3]]])
|
|
58
|
-
})
|
|
59
|
-
: identity,
|
|
60
|
-
Effect.tap(([portId, req]) => {
|
|
61
|
-
const id = req[0]
|
|
62
|
-
if (req[1] === 1) {
|
|
63
|
-
const fiber = fiberMap.get(id)
|
|
64
|
-
if (!fiber) return Effect.void
|
|
65
|
-
return Fiber.interrupt(fiber)
|
|
66
|
-
}
|
|
67
|
-
|
|
68
|
-
const collector = Transferable.unsafeMakeCollector()
|
|
69
|
-
return pipe(
|
|
70
|
-
Effect.sync(() => process(req[2])),
|
|
71
|
-
Effect.flatMap((stream) => {
|
|
72
|
-
let effect = Effect.isEffect(stream)
|
|
73
|
-
? Effect.flatMap(stream, (data) => {
|
|
74
|
-
const transfers = options?.transfers ? options.transfers(data) : []
|
|
75
|
-
return pipe(
|
|
76
|
-
options?.encodeOutput
|
|
77
|
-
? Effect.provideService(options.encodeOutput(req[2], data), Transferable.Collector, collector)
|
|
78
|
-
: Effect.succeed(data),
|
|
79
|
-
Effect.flatMap((payload) =>
|
|
80
|
-
backing.send(portId, [id, 0, [payload]], [...transfers, ...collector.unsafeRead()]),
|
|
81
|
-
),
|
|
82
|
-
)
|
|
83
|
-
})
|
|
84
|
-
: pipe(
|
|
85
|
-
stream,
|
|
86
|
-
Stream.chunks,
|
|
87
|
-
Stream.tap((data) => {
|
|
88
|
-
if (options?.encodeOutput === undefined) {
|
|
89
|
-
const payload = Chunk.toReadonlyArray(data)
|
|
90
|
-
const transfers = options?.transfers ? payload.flatMap(options.transfers) : undefined
|
|
91
|
-
return backing.send(portId, [id, 0, payload], transfers)
|
|
92
|
-
}
|
|
93
|
-
|
|
94
|
-
const transfers: unknown[] = []
|
|
95
|
-
collector.unsafeClear()
|
|
96
|
-
return pipe(
|
|
97
|
-
Effect.forEach(data, (data) => {
|
|
98
|
-
if (options?.transfers) {
|
|
99
|
-
for (const option of options.transfers(data)) {
|
|
100
|
-
transfers.push(option)
|
|
101
|
-
}
|
|
102
|
-
}
|
|
103
|
-
return Effect.orDie(options.encodeOutput!(req[2], data))
|
|
104
|
-
}),
|
|
105
|
-
Effect.provideService(Transferable.Collector, collector),
|
|
106
|
-
Effect.flatMap((payload) => {
|
|
107
|
-
collector.unsafeRead().forEach((transfer) => transfers.push(transfer))
|
|
108
|
-
return backing.send(portId, [id, 0, payload], transfers)
|
|
109
|
-
}),
|
|
110
|
-
)
|
|
111
|
-
}),
|
|
112
|
-
Stream.runDrain,
|
|
113
|
-
Effect.andThen(backing.send(portId, [id, 1])),
|
|
114
|
-
)
|
|
115
|
-
|
|
116
|
-
if (req[3]) {
|
|
117
|
-
const [traceId, spanId, sampled] = req[3]
|
|
118
|
-
effect = Effect.withParentSpan(effect, {
|
|
119
|
-
_tag: 'ExternalSpan',
|
|
120
|
-
traceId,
|
|
121
|
-
spanId,
|
|
122
|
-
sampled,
|
|
123
|
-
context: Context.empty(),
|
|
124
|
-
})
|
|
125
|
-
}
|
|
126
|
-
|
|
127
|
-
return effect
|
|
128
|
-
}),
|
|
129
|
-
Effect.catchIf(isWorkerError, (error) =>
|
|
130
|
-
backing.send(portId, [id, 3, WorkerError.encodeCause(Cause.fail(error))]),
|
|
131
|
-
),
|
|
132
|
-
Effect.onExit((exit) => {
|
|
133
|
-
if (exit._tag === 'Success') {
|
|
134
|
-
return Effect.void
|
|
135
|
-
}
|
|
136
|
-
return Either.match(Cause.failureOrCause(exit.cause), {
|
|
137
|
-
onLeft: (error) => {
|
|
138
|
-
const transfers = options?.transfers ? options.transfers(error) : []
|
|
139
|
-
collector.unsafeClear()
|
|
140
|
-
return pipe(
|
|
141
|
-
options?.encodeError
|
|
142
|
-
? Effect.provideService(options.encodeError(req[2], error), Transferable.Collector, collector)
|
|
143
|
-
: Effect.succeed(error),
|
|
144
|
-
Effect.flatMap((payload) =>
|
|
145
|
-
backing.send(portId, [id, 2, payload as any], [...transfers, ...collector.unsafeRead()]),
|
|
146
|
-
),
|
|
147
|
-
Effect.catchAllCause((cause) => backing.send(portId, [id, 3, WorkerError.encodeCause(cause)])),
|
|
148
|
-
)
|
|
149
|
-
},
|
|
150
|
-
onRight: (cause) => backing.send(portId, [id, 3, WorkerError.encodeCause(cause)]),
|
|
151
|
-
})
|
|
152
|
-
}),
|
|
153
|
-
Effect.ensuring(Effect.sync(() => fiberMap.delete(id))),
|
|
154
|
-
Effect.fork,
|
|
155
|
-
Effect.tap((fiber) => Effect.sync(() => fiberMap.set(id, fiber))),
|
|
156
|
-
)
|
|
157
|
-
}),
|
|
158
|
-
Effect.forever,
|
|
159
|
-
Effect.forkIn(scope),
|
|
160
|
-
)
|
|
161
|
-
})
|
|
162
|
-
|
|
163
|
-
/** @internal */
|
|
164
|
-
export const layer = <I, E, R, O>(
|
|
165
|
-
process: (request: I) => Stream.Stream<O, E, R> | Effect.Effect<O, E, R>,
|
|
166
|
-
options?: WorkerRunner.Runner.Options<I, O, E>,
|
|
167
|
-
): Layer.Layer<never, WorkerError, WorkerRunner.PlatformRunner | R> => Layer.scopedDiscard(make(process, options))
|
|
168
|
-
|
|
169
|
-
/** @internal */
|
|
170
|
-
export const makeSerialized = <
|
|
171
|
-
R,
|
|
172
|
-
I,
|
|
173
|
-
A extends Schema.TaggedRequest.All,
|
|
174
|
-
const Handlers extends WorkerRunner.SerializedRunner.Handlers<A>,
|
|
175
|
-
>(
|
|
176
|
-
schema: Schema.Schema<A, I, R>,
|
|
177
|
-
handlers: Handlers,
|
|
178
|
-
): Effect.Effect<
|
|
179
|
-
void,
|
|
180
|
-
WorkerError,
|
|
181
|
-
R | WorkerRunner.PlatformRunner | Scope.Scope | WorkerRunner.SerializedRunner.HandlersContext<Handlers>
|
|
182
|
-
> =>
|
|
183
|
-
Effect.gen(function* (_) {
|
|
184
|
-
const scope = yield* _(Effect.scope)
|
|
185
|
-
let context = Context.empty() as Context.Context<any>
|
|
186
|
-
const parseRequest = Schema.decodeUnknown(schema) as (_: unknown) => Effect.Effect<A>
|
|
187
|
-
|
|
188
|
-
return yield* _(
|
|
189
|
-
make(
|
|
190
|
-
(request: A) => {
|
|
191
|
-
const result = (handlers as any)[request._tag](request)
|
|
192
|
-
if (Layer.isLayer(result)) {
|
|
193
|
-
return Effect.flatMap(Layer.buildWithScope(result, scope), (_) =>
|
|
194
|
-
Effect.sync(() => {
|
|
195
|
-
context = Context.merge(context, _)
|
|
196
|
-
}),
|
|
197
|
-
)
|
|
198
|
-
} else if (Effect.isEffect(result)) {
|
|
199
|
-
return Effect.provide(result, context)
|
|
200
|
-
}
|
|
201
|
-
return Stream.provideContext(result as any, context)
|
|
202
|
-
},
|
|
203
|
-
{
|
|
204
|
-
decode(message) {
|
|
205
|
-
return Effect.mapError(parseRequest(message), (error) => new WorkerError({ reason: 'decode', error }))
|
|
206
|
-
},
|
|
207
|
-
encodeError(request, message) {
|
|
208
|
-
return Effect.mapError(
|
|
209
|
-
Serializable.serializeFailure(request as any, message),
|
|
210
|
-
(error) => new WorkerError({ reason: 'encode', error }),
|
|
211
|
-
)
|
|
212
|
-
},
|
|
213
|
-
encodeOutput(request, message) {
|
|
214
|
-
return Effect.catchAllCause(
|
|
215
|
-
Serializable.serializeSuccess(request as any, message),
|
|
216
|
-
(error) => new WorkerError({ reason: 'encode', error }),
|
|
217
|
-
)
|
|
218
|
-
},
|
|
219
|
-
},
|
|
220
|
-
),
|
|
221
|
-
)
|
|
222
|
-
}) as any
|
|
223
|
-
|
|
224
|
-
/** @internal */
|
|
225
|
-
export const layerSerialized = <
|
|
226
|
-
R,
|
|
227
|
-
I,
|
|
228
|
-
A extends Schema.TaggedRequest.All,
|
|
229
|
-
const Handlers extends WorkerRunner.SerializedRunner.Handlers<A>,
|
|
230
|
-
>(
|
|
231
|
-
schema: Schema.Schema<A, I, R>,
|
|
232
|
-
handlers: Handlers,
|
|
233
|
-
): Layer.Layer<
|
|
234
|
-
never,
|
|
235
|
-
WorkerError,
|
|
236
|
-
R | WorkerRunner.PlatformRunner | WorkerRunner.SerializedRunner.HandlersContext<Handlers>
|
|
237
|
-
> => Layer.scopedDiscard(makeSerialized(schema, handlers))
|