@livestore/utils 0.4.0-dev.3 → 0.4.0-dev.6
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/.tsbuildinfo.json +1 -1
- package/dist/NoopTracer.d.ts.map +1 -1
- package/dist/NoopTracer.js +1 -0
- package/dist/NoopTracer.js.map +1 -1
- package/dist/effect/Effect.d.ts +1 -2
- package/dist/effect/Effect.d.ts.map +1 -1
- package/dist/effect/Effect.js +2 -2
- package/dist/effect/Effect.js.map +1 -1
- package/dist/effect/Error.d.ts +1 -1
- package/dist/effect/Error.js.map +1 -1
- package/dist/effect/Logger.d.ts +4 -1
- package/dist/effect/Logger.d.ts.map +1 -1
- package/dist/effect/Logger.js +12 -3
- package/dist/effect/Logger.js.map +1 -1
- package/dist/effect/OtelTracer.d.ts +5 -0
- package/dist/effect/OtelTracer.d.ts.map +1 -0
- package/dist/effect/OtelTracer.js +8 -0
- package/dist/effect/OtelTracer.js.map +1 -0
- package/dist/effect/RpcClient.d.ts +32 -0
- package/dist/effect/RpcClient.d.ts.map +1 -0
- package/dist/effect/RpcClient.js +142 -0
- package/dist/effect/RpcClient.js.map +1 -0
- package/dist/effect/Schema/index.d.ts +0 -1
- package/dist/effect/Schema/index.d.ts.map +1 -1
- package/dist/effect/Schema/index.js +0 -1
- package/dist/effect/Schema/index.js.map +1 -1
- package/dist/effect/Stream.d.ts +73 -2
- package/dist/effect/Stream.d.ts.map +1 -1
- package/dist/effect/Stream.js +68 -1
- package/dist/effect/Stream.js.map +1 -1
- package/dist/effect/Stream.test.d.ts +2 -0
- package/dist/effect/Stream.test.d.ts.map +1 -0
- package/dist/effect/Stream.test.js +84 -0
- package/dist/effect/Stream.test.js.map +1 -0
- package/dist/effect/SubscriptionRef.d.ts +2 -2
- package/dist/effect/SubscriptionRef.d.ts.map +1 -1
- package/dist/effect/SubscriptionRef.js +6 -1
- package/dist/effect/SubscriptionRef.js.map +1 -1
- package/dist/effect/WebSocket.js +1 -1
- package/dist/effect/WebSocket.js.map +1 -1
- package/dist/effect/index.d.ts +9 -5
- package/dist/effect/index.d.ts.map +1 -1
- package/dist/effect/index.js +10 -7
- package/dist/effect/index.js.map +1 -1
- package/dist/global.d.ts +1 -0
- package/dist/global.d.ts.map +1 -1
- package/dist/global.js.map +1 -1
- package/dist/mod.d.ts +2 -0
- package/dist/mod.d.ts.map +1 -1
- package/dist/mod.js +4 -0
- package/dist/mod.js.map +1 -1
- package/dist/node/ChildProcessRunner/ChildProcessRunner.d.ts.map +1 -1
- package/dist/node/ChildProcessRunner/ChildProcessRunner.js +54 -10
- package/dist/node/ChildProcessRunner/ChildProcessRunner.js.map +1 -1
- package/dist/node/ChildProcessRunner/ChildProcessRunnerTest/ChildProcessRunner.test.js +182 -3
- package/dist/node/ChildProcessRunner/ChildProcessRunnerTest/ChildProcessRunner.test.js.map +1 -1
- package/dist/node/ChildProcessRunner/ChildProcessRunnerTest/schema.d.ts +12 -3
- package/dist/node/ChildProcessRunner/ChildProcessRunnerTest/schema.d.ts.map +1 -1
- package/dist/node/ChildProcessRunner/ChildProcessRunnerTest/schema.js +7 -1
- package/dist/node/ChildProcessRunner/ChildProcessRunnerTest/schema.js.map +1 -1
- package/dist/node/ChildProcessRunner/ChildProcessRunnerTest/serializedWorker.js +11 -1
- package/dist/node/ChildProcessRunner/ChildProcessRunnerTest/serializedWorker.js.map +1 -1
- package/dist/node/ChildProcessRunner/ChildProcessWorker.d.ts +16 -0
- package/dist/node/ChildProcessRunner/ChildProcessWorker.d.ts.map +1 -1
- package/dist/node/ChildProcessRunner/ChildProcessWorker.js +98 -2
- package/dist/node/ChildProcessRunner/ChildProcessWorker.js.map +1 -1
- package/dist/node/mod.d.ts +1 -1
- package/dist/node/mod.d.ts.map +1 -1
- package/dist/node/mod.js +2 -2
- package/dist/node/mod.js.map +1 -1
- package/package.json +31 -32
- package/src/NoopTracer.ts +1 -0
- package/src/effect/Effect.ts +15 -3
- package/src/effect/Error.ts +1 -1
- package/src/effect/Logger.ts +14 -4
- package/src/effect/OtelTracer.ts +11 -0
- package/src/effect/RpcClient.ts +205 -0
- package/src/effect/Schema/index.ts +0 -1
- package/src/effect/Stream.test.ts +127 -0
- package/src/effect/Stream.ts +111 -2
- package/src/effect/SubscriptionRef.ts +14 -2
- package/src/effect/WebSocket.ts +1 -1
- package/src/effect/index.ts +16 -5
- package/src/global.ts +1 -0
- package/src/mod.ts +9 -0
- package/src/node/ChildProcessRunner/ChildProcessRunner.ts +59 -10
- package/src/node/ChildProcessRunner/ChildProcessRunnerTest/ChildProcessRunner.test.ts +253 -3
- package/src/node/ChildProcessRunner/ChildProcessRunnerTest/schema.ts +14 -1
- package/src/node/ChildProcessRunner/ChildProcessRunnerTest/serializedWorker.ts +14 -1
- package/src/node/ChildProcessRunner/ChildProcessWorker.ts +111 -3
- package/src/node/mod.ts +3 -3
- package/src/effect/Schema/msgpack.ts +0 -8
|
@@ -1,5 +1,4 @@
|
|
|
1
|
-
import
|
|
2
|
-
import { Chunk, Effect, pipe, Stream } from 'effect'
|
|
1
|
+
import { Chunk, Effect, pipe, Stream, SubscriptionRef } from 'effect'
|
|
3
2
|
import { dual } from 'effect/Function'
|
|
4
3
|
import type { Predicate, Refinement } from 'effect/Predicate'
|
|
5
4
|
|
|
@@ -20,3 +19,16 @@ export const waitUntil: {
|
|
|
20
19
|
} = dual(2, <A>(sref: SubscriptionRef.SubscriptionRef<A>, predicate: (a: A) => boolean) =>
|
|
21
20
|
pipe(sref.changes, Stream.filter(predicate), Stream.take(1), Stream.runCollect, Effect.map(Chunk.unsafeHead)),
|
|
22
21
|
)
|
|
22
|
+
|
|
23
|
+
export const fromStream = <A>(stream: Stream.Stream<A>, initialValue: A) =>
|
|
24
|
+
Effect.gen(function* () {
|
|
25
|
+
const sref = yield* SubscriptionRef.make(initialValue)
|
|
26
|
+
|
|
27
|
+
yield* stream.pipe(
|
|
28
|
+
Stream.tap((a) => SubscriptionRef.set(sref, a)),
|
|
29
|
+
Stream.runDrain,
|
|
30
|
+
Effect.forkScoped,
|
|
31
|
+
)
|
|
32
|
+
|
|
33
|
+
return sref
|
|
34
|
+
})
|
package/src/effect/WebSocket.ts
CHANGED
package/src/effect/index.ts
CHANGED
|
@@ -1,6 +1,7 @@
|
|
|
1
1
|
import '../global.ts'
|
|
2
2
|
|
|
3
|
-
export
|
|
3
|
+
export { AiError, AiLanguageModel, AiModel, AiTool, AiToolkit, McpSchema, McpServer } from '@effect/ai'
|
|
4
|
+
export * as Otlp from '@effect/opentelemetry/Otlp'
|
|
4
5
|
export {
|
|
5
6
|
Command,
|
|
6
7
|
CommandExecutor,
|
|
@@ -8,6 +9,11 @@ export {
|
|
|
8
9
|
FetchHttpClient,
|
|
9
10
|
FileSystem,
|
|
10
11
|
Headers,
|
|
12
|
+
HttpApi,
|
|
13
|
+
HttpApiClient,
|
|
14
|
+
HttpApiEndpoint,
|
|
15
|
+
HttpApiGroup,
|
|
16
|
+
HttpApp,
|
|
11
17
|
HttpClient,
|
|
12
18
|
HttpClientError,
|
|
13
19
|
HttpClientRequest,
|
|
@@ -18,6 +24,7 @@ export {
|
|
|
18
24
|
HttpServerRequest,
|
|
19
25
|
HttpServerResponse,
|
|
20
26
|
KeyValueStore,
|
|
27
|
+
MsgPack,
|
|
21
28
|
Socket,
|
|
22
29
|
Terminal,
|
|
23
30
|
Transferable,
|
|
@@ -29,7 +36,8 @@ export {
|
|
|
29
36
|
export { BrowserWorker, BrowserWorkerRunner } from '@effect/platform-browser'
|
|
30
37
|
export {
|
|
31
38
|
Rpc,
|
|
32
|
-
RpcClient,
|
|
39
|
+
// RpcClient, // TODO bring back "original" RpcClient from effect/rpc
|
|
40
|
+
RpcClientError,
|
|
33
41
|
RpcGroup,
|
|
34
42
|
RpcMessage,
|
|
35
43
|
RpcMiddleware,
|
|
@@ -47,8 +55,8 @@ export {
|
|
|
47
55
|
Cause,
|
|
48
56
|
Channel,
|
|
49
57
|
Chunk,
|
|
50
|
-
// Logger,
|
|
51
58
|
Config,
|
|
59
|
+
ConfigError,
|
|
52
60
|
Console,
|
|
53
61
|
Context,
|
|
54
62
|
Data,
|
|
@@ -58,6 +66,7 @@ export {
|
|
|
58
66
|
Equal,
|
|
59
67
|
ExecutionStrategy,
|
|
60
68
|
Exit,
|
|
69
|
+
FastCheck,
|
|
61
70
|
Fiber,
|
|
62
71
|
FiberHandle,
|
|
63
72
|
FiberId,
|
|
@@ -107,13 +116,15 @@ export {
|
|
|
107
116
|
Tracer,
|
|
108
117
|
Types,
|
|
109
118
|
} from 'effect'
|
|
110
|
-
export {
|
|
119
|
+
export type { NonEmptyArray } from 'effect/Array'
|
|
120
|
+
export { constVoid, dual } from 'effect/Function'
|
|
111
121
|
export { TreeFormatter } from 'effect/ParseResult'
|
|
112
122
|
export type { Serializable, SerializableWithResult } from 'effect/Schema'
|
|
113
|
-
|
|
114
123
|
export * as SchemaAST from 'effect/SchemaAST'
|
|
115
124
|
export * as BucketQueue from './BucketQueue.ts'
|
|
116
125
|
export * as Logger from './Logger.ts'
|
|
126
|
+
export * as OtelTracer from './OtelTracer.ts'
|
|
127
|
+
export * as RpcClient from './RpcClient.ts'
|
|
117
128
|
export * as Schema from './Schema/index.ts'
|
|
118
129
|
export * as Stream from './Stream.ts'
|
|
119
130
|
export * as Subscribable from './Subscribable.ts'
|
package/src/global.ts
CHANGED
package/src/mod.ts
CHANGED
|
@@ -234,4 +234,13 @@ export const isPromise = (value: any): value is Promise<unknown> => typeof value
|
|
|
234
234
|
|
|
235
235
|
export const isIterable = <T>(value: any): value is Iterable<T> => typeof value?.[Symbol.iterator] === 'function'
|
|
236
236
|
|
|
237
|
+
/** This utility "lies" as a means of compat with libs that don't explicitly type optionals as unioned with `undefined`. */
|
|
238
|
+
export const omitUndefineds = <T extends Record<keyof any, unknown>>(
|
|
239
|
+
rec: T,
|
|
240
|
+
): {
|
|
241
|
+
[K in keyof T]: Exclude<T[K], undefined>
|
|
242
|
+
} => {
|
|
243
|
+
return rec as never
|
|
244
|
+
}
|
|
245
|
+
|
|
237
246
|
export { objectToString as errorToString } from './misc.ts'
|
|
@@ -13,6 +13,43 @@ import * as Layer from 'effect/Layer'
|
|
|
13
13
|
import * as Runtime from 'effect/Runtime'
|
|
14
14
|
import * as Scope from 'effect/Scope'
|
|
15
15
|
|
|
16
|
+
// Parent death monitoring setup
|
|
17
|
+
let parentDeathDetectionEnabled = false
|
|
18
|
+
|
|
19
|
+
const setupParentDeathMonitoring = (parentPid: number) => {
|
|
20
|
+
if (parentDeathDetectionEnabled) return
|
|
21
|
+
parentDeathDetectionEnabled = true
|
|
22
|
+
|
|
23
|
+
let consecutiveFailures = 0
|
|
24
|
+
const maxFailures = 3 // Require 3 consecutive failures before self-terminating
|
|
25
|
+
|
|
26
|
+
// Check if parent is still alive every 2 seconds (more conservative)
|
|
27
|
+
const checkParentAlive = () => {
|
|
28
|
+
try {
|
|
29
|
+
// Send signal 0 to check if process exists (doesn't actually send signal)
|
|
30
|
+
process.kill(parentPid, 0)
|
|
31
|
+
// If we reach here, parent is still alive, reset failure counter and check again later
|
|
32
|
+
consecutiveFailures = 0
|
|
33
|
+
setTimeout(checkParentAlive, 2000)
|
|
34
|
+
} catch {
|
|
35
|
+
consecutiveFailures++
|
|
36
|
+
console.warn(`[Worker ${process.pid}] Parent check failed (${consecutiveFailures}/${maxFailures})`)
|
|
37
|
+
|
|
38
|
+
if (consecutiveFailures >= maxFailures) {
|
|
39
|
+
// Parent process has been gone for multiple checks, self-terminate
|
|
40
|
+
console.error(`[Worker ${process.pid}] Parent process ${parentPid} confirmed dead, self-terminating`)
|
|
41
|
+
process.exit(0)
|
|
42
|
+
} else {
|
|
43
|
+
// Try again sooner on failure
|
|
44
|
+
setTimeout(checkParentAlive, 1000)
|
|
45
|
+
}
|
|
46
|
+
}
|
|
47
|
+
}
|
|
48
|
+
|
|
49
|
+
// Start monitoring after a longer initial delay to let things settle
|
|
50
|
+
setTimeout(checkParentAlive, 5000)
|
|
51
|
+
}
|
|
52
|
+
|
|
16
53
|
const platformRunnerImpl = Runner.PlatformRunner.of({
|
|
17
54
|
[Runner.PlatformRunnerTypeId]: Runner.PlatformRunnerTypeId,
|
|
18
55
|
start<I, O>(closeLatch: typeof CloseLatch.Service) {
|
|
@@ -43,18 +80,30 @@ const platformRunnerImpl = Runner.PlatformRunner.of({
|
|
|
43
80
|
Deferred.unsafeDone(closeLatch, Exit.die(exit.cause))
|
|
44
81
|
}
|
|
45
82
|
}
|
|
46
|
-
port.on('message', (message: Runner.BackingRunner.Message<I>) => {
|
|
83
|
+
port.on('message', (message: Runner.BackingRunner.Message<I> | any) => {
|
|
47
84
|
// console.log('message', message)
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
85
|
+
|
|
86
|
+
// Handle parent death detection setup messages
|
|
87
|
+
if (Array.isArray(message) && message[0] === 'setup-parent-death-detection' && message[1]?.parentPid) {
|
|
88
|
+
const parentPid = message[1].parentPid
|
|
89
|
+
// console.log(`[Worker ${process.pid}] Setting up parent death detection for parent ${parentPid}`)
|
|
90
|
+
setupParentDeathMonitoring(parentPid)
|
|
91
|
+
return
|
|
92
|
+
}
|
|
93
|
+
|
|
94
|
+
// Handle normal Effect worker messages
|
|
95
|
+
if (Array.isArray(message) && typeof message[0] === 'number') {
|
|
96
|
+
if (message[0] === 0) {
|
|
97
|
+
const result = handler(0, message[1])
|
|
98
|
+
if (Effect.isEffect(result)) {
|
|
99
|
+
const fiber = runFork(result)
|
|
100
|
+
fiber.addObserver(onExit)
|
|
101
|
+
FiberSet.unsafeAdd(fiberSet, fiber)
|
|
102
|
+
}
|
|
103
|
+
} else {
|
|
104
|
+
Deferred.unsafeDone(closeLatch, Exit.void)
|
|
105
|
+
port.close()
|
|
54
106
|
}
|
|
55
|
-
} else {
|
|
56
|
-
Deferred.unsafeDone(closeLatch, Exit.void)
|
|
57
|
-
port.close()
|
|
58
107
|
}
|
|
59
108
|
})
|
|
60
109
|
port.on('messageerror', (cause) => {
|
|
@@ -1,13 +1,12 @@
|
|
|
1
|
-
// import * as WorkerThreads from 'node:worker_threads'
|
|
2
1
|
import * as ChildProcess from 'node:child_process'
|
|
3
2
|
|
|
4
3
|
import * as EffectWorker from '@effect/platform/Worker'
|
|
5
4
|
import { assert, describe, it } from '@effect/vitest'
|
|
6
|
-
import { Chunk, Effect, Stream } from 'effect'
|
|
5
|
+
import { Chunk, Effect, Exit, Fiber, Scope, Stream } from 'effect'
|
|
7
6
|
|
|
8
7
|
import * as ChildProcessWorker from '../ChildProcessWorker.ts'
|
|
9
8
|
import type { WorkerMessage } from './schema.ts'
|
|
10
|
-
import { GetPersonById, GetUserById, InitialMessage, Person, User } from './schema.ts'
|
|
9
|
+
import { GetPersonById, GetUserById, InitialMessage, Person, StartStubbornWorker, User } from './schema.ts'
|
|
11
10
|
|
|
12
11
|
const WorkerLive = ChildProcessWorker.layer(() =>
|
|
13
12
|
ChildProcess.fork(
|
|
@@ -49,4 +48,255 @@ describe('ChildProcessRunner', { timeout: 10_000 }, () => {
|
|
|
49
48
|
new Person({ id: 123, name: 'ing', data: new Uint8Array([4, 5, 6]) }),
|
|
50
49
|
])
|
|
51
50
|
}).pipe(Effect.scoped, Effect.provide(WorkerLive), Effect.runPromise))
|
|
51
|
+
|
|
52
|
+
describe('Process Cleanup', { timeout: 15_000 }, () => {
|
|
53
|
+
const isProcessRunning = (pid: number) => {
|
|
54
|
+
try {
|
|
55
|
+
process.kill(pid, 0) // Signal 0 checks if process exists
|
|
56
|
+
return true
|
|
57
|
+
} catch {
|
|
58
|
+
return false
|
|
59
|
+
}
|
|
60
|
+
}
|
|
61
|
+
|
|
62
|
+
it('should clean up child processes when Effect is interrupted', () =>
|
|
63
|
+
Effect.gen(function* () {
|
|
64
|
+
let workerPid: number | undefined
|
|
65
|
+
|
|
66
|
+
const testEffect = Effect.gen(function* () {
|
|
67
|
+
const pool = yield* EffectWorker.makePoolSerialized<WorkerMessage>({
|
|
68
|
+
size: 1,
|
|
69
|
+
initialMessage: () => new InitialMessage({ name: 'test', data: new Uint8Array([1, 2, 3]) }),
|
|
70
|
+
})
|
|
71
|
+
const result = yield* pool.executeEffect(new StartStubbornWorker({ blockDuration: 30_000 }))
|
|
72
|
+
workerPid = result.pid
|
|
73
|
+
|
|
74
|
+
// Verify the worker process is running
|
|
75
|
+
assert.strictEqual(isProcessRunning(workerPid), true, 'Worker process should be running')
|
|
76
|
+
|
|
77
|
+
// Start a long-running operation that we'll interrupt
|
|
78
|
+
yield* Effect.sleep('60 seconds')
|
|
79
|
+
}).pipe(Effect.scoped, Effect.provide(WorkerLive))
|
|
80
|
+
|
|
81
|
+
// Run the test effect but interrupt it after 2 seconds
|
|
82
|
+
const fiber = yield* Effect.fork(testEffect)
|
|
83
|
+
yield* Effect.sleep('2 seconds')
|
|
84
|
+
yield* Fiber.interrupt(fiber)
|
|
85
|
+
|
|
86
|
+
// Wait a moment for cleanup to complete
|
|
87
|
+
yield* Effect.sleep('1 second')
|
|
88
|
+
|
|
89
|
+
// Verify the child process was cleaned up
|
|
90
|
+
if (workerPid) {
|
|
91
|
+
assert.strictEqual(
|
|
92
|
+
isProcessRunning(workerPid),
|
|
93
|
+
false,
|
|
94
|
+
`Worker process ${workerPid} should be terminated after Effect interruption`,
|
|
95
|
+
)
|
|
96
|
+
} else {
|
|
97
|
+
assert.fail('Worker PID was not captured')
|
|
98
|
+
}
|
|
99
|
+
}).pipe(Effect.runPromise))
|
|
100
|
+
|
|
101
|
+
it('should clean up child processes when scope is closed abruptly', () =>
|
|
102
|
+
Effect.gen(function* () {
|
|
103
|
+
let workerPid: number | undefined
|
|
104
|
+
|
|
105
|
+
// Create a scope that we can close manually
|
|
106
|
+
const scope = yield* Scope.make()
|
|
107
|
+
|
|
108
|
+
try {
|
|
109
|
+
const pool = yield* EffectWorker.makePoolSerialized<WorkerMessage>({
|
|
110
|
+
size: 1,
|
|
111
|
+
initialMessage: () => new InitialMessage({ name: 'test', data: new Uint8Array([1, 2, 3]) }),
|
|
112
|
+
}).pipe(Scope.extend(scope), Effect.provide(WorkerLive))
|
|
113
|
+
|
|
114
|
+
const result = yield* pool.executeEffect(new StartStubbornWorker({ blockDuration: 30_000 }))
|
|
115
|
+
workerPid = result.pid
|
|
116
|
+
|
|
117
|
+
// Verify the worker is running
|
|
118
|
+
assert.strictEqual(isProcessRunning(workerPid!), true, 'Worker process should be running')
|
|
119
|
+
} finally {
|
|
120
|
+
// Abruptly close the scope (simulating test abortion)
|
|
121
|
+
yield* Scope.close(scope, Exit.void)
|
|
122
|
+
}
|
|
123
|
+
|
|
124
|
+
// Wait for cleanup
|
|
125
|
+
yield* Effect.sleep('1 second')
|
|
126
|
+
|
|
127
|
+
// This should pass but will initially fail due to zombie process issue
|
|
128
|
+
if (workerPid) {
|
|
129
|
+
assert.strictEqual(
|
|
130
|
+
isProcessRunning(workerPid),
|
|
131
|
+
false,
|
|
132
|
+
`Worker process ${workerPid} should be terminated after scope closure`,
|
|
133
|
+
)
|
|
134
|
+
} else {
|
|
135
|
+
assert.fail('Worker PID was not captured')
|
|
136
|
+
}
|
|
137
|
+
}).pipe(Effect.runPromise))
|
|
138
|
+
|
|
139
|
+
it('should clean up child processes when parent receives SIGINT', () =>
|
|
140
|
+
Effect.gen(function* () {
|
|
141
|
+
let workerPid: number | undefined
|
|
142
|
+
|
|
143
|
+
const pool = yield* EffectWorker.makePoolSerialized<WorkerMessage>({
|
|
144
|
+
size: 1,
|
|
145
|
+
initialMessage: () => new InitialMessage({ name: 'test', data: new Uint8Array([1, 2, 3]) }),
|
|
146
|
+
})
|
|
147
|
+
|
|
148
|
+
const result = yield* pool.executeEffect(new StartStubbornWorker({ blockDuration: 60_000 }))
|
|
149
|
+
workerPid = result.pid
|
|
150
|
+
|
|
151
|
+
// Verify the worker is running
|
|
152
|
+
assert.strictEqual(isProcessRunning(workerPid), true, 'Worker process should be running')
|
|
153
|
+
|
|
154
|
+
// Simulate SIGINT being sent to current process (like Ctrl+C in vitest)
|
|
155
|
+
// This should trigger cleanup of child processes
|
|
156
|
+
yield* Effect.async<void>((resume) => {
|
|
157
|
+
// Store current listeners before we manipulate them
|
|
158
|
+
const currentSIGINTListeners = process.listeners('SIGINT').slice()
|
|
159
|
+
|
|
160
|
+
// Set up our test handler
|
|
161
|
+
const testHandler = () => {
|
|
162
|
+
// Emit SIGINT to all current listeners to trigger cleanup
|
|
163
|
+
currentSIGINTListeners.forEach((listener) => {
|
|
164
|
+
try {
|
|
165
|
+
;(listener as Function)()
|
|
166
|
+
} catch {
|
|
167
|
+
// Ignore errors
|
|
168
|
+
}
|
|
169
|
+
})
|
|
170
|
+
resume(Effect.void)
|
|
171
|
+
}
|
|
172
|
+
|
|
173
|
+
// Remove all current SIGINT listeners and add our test handler
|
|
174
|
+
process.removeAllListeners('SIGINT')
|
|
175
|
+
process.once('SIGINT', testHandler)
|
|
176
|
+
|
|
177
|
+
// Send SIGINT after a short delay
|
|
178
|
+
setTimeout(() => {
|
|
179
|
+
process.kill(process.pid, 'SIGINT')
|
|
180
|
+
}, 1000)
|
|
181
|
+
})
|
|
182
|
+
|
|
183
|
+
// Wait for cleanup to complete
|
|
184
|
+
yield* Effect.sleep('2 seconds')
|
|
185
|
+
|
|
186
|
+
// This test should initially fail - child process will still be running
|
|
187
|
+
if (workerPid) {
|
|
188
|
+
assert.strictEqual(
|
|
189
|
+
isProcessRunning(workerPid),
|
|
190
|
+
false,
|
|
191
|
+
`Worker process ${workerPid} should be terminated after SIGINT`,
|
|
192
|
+
)
|
|
193
|
+
} else {
|
|
194
|
+
assert.fail('Worker PID was not captured')
|
|
195
|
+
}
|
|
196
|
+
}).pipe(Effect.scoped, Effect.provide(WorkerLive), Effect.runPromise))
|
|
197
|
+
|
|
198
|
+
it('should clean up multiple concurrent child processes', () =>
|
|
199
|
+
Effect.gen(function* () {
|
|
200
|
+
let workerPids: number[] = []
|
|
201
|
+
|
|
202
|
+
const testEffect = Effect.gen(function* () {
|
|
203
|
+
const pool = yield* EffectWorker.makePoolSerialized<WorkerMessage>({
|
|
204
|
+
size: 3, // Multiple workers
|
|
205
|
+
initialMessage: () => new InitialMessage({ name: 'test', data: new Uint8Array([1, 2, 3]) }),
|
|
206
|
+
})
|
|
207
|
+
|
|
208
|
+
// Start multiple stubborn workers
|
|
209
|
+
const workers = yield* Effect.all(
|
|
210
|
+
[
|
|
211
|
+
pool.executeEffect(new StartStubbornWorker({ blockDuration: 30_000 })),
|
|
212
|
+
pool.executeEffect(new StartStubbornWorker({ blockDuration: 30_000 })),
|
|
213
|
+
pool.executeEffect(new StartStubbornWorker({ blockDuration: 30_000 })),
|
|
214
|
+
],
|
|
215
|
+
{ concurrency: 'unbounded' },
|
|
216
|
+
)
|
|
217
|
+
|
|
218
|
+
workerPids = workers.map((w) => w.pid)
|
|
219
|
+
|
|
220
|
+
// Verify all workers are running
|
|
221
|
+
for (const pid of workerPids) {
|
|
222
|
+
assert.strictEqual(isProcessRunning(pid), true, `Worker process ${pid} should be running`)
|
|
223
|
+
}
|
|
224
|
+
|
|
225
|
+
yield* Effect.sleep('30 seconds') // Keep running until interrupted
|
|
226
|
+
}).pipe(Effect.scoped, Effect.provide(WorkerLive))
|
|
227
|
+
|
|
228
|
+
// Run with timeout to force termination
|
|
229
|
+
const fiber = yield* Effect.fork(testEffect)
|
|
230
|
+
yield* Effect.sleep('2 seconds')
|
|
231
|
+
yield* Fiber.interrupt(fiber)
|
|
232
|
+
|
|
233
|
+
// Wait for cleanup
|
|
234
|
+
yield* Effect.sleep('2 seconds')
|
|
235
|
+
|
|
236
|
+
// All worker processes should be cleaned up
|
|
237
|
+
for (const pid of workerPids) {
|
|
238
|
+
assert.strictEqual(
|
|
239
|
+
isProcessRunning(pid),
|
|
240
|
+
false,
|
|
241
|
+
`Worker process ${pid} should be terminated after pool cleanup`,
|
|
242
|
+
)
|
|
243
|
+
}
|
|
244
|
+
}).pipe(Effect.runPromise))
|
|
245
|
+
|
|
246
|
+
it('should handle direct ChildProcess.fork cleanup (node-sync pattern)', () =>
|
|
247
|
+
Effect.gen(function* () {
|
|
248
|
+
let childPid: number | undefined
|
|
249
|
+
|
|
250
|
+
// This mimics the exact pattern used in node-sync tests
|
|
251
|
+
const nodeChildProcess = ChildProcess.fork(
|
|
252
|
+
new URL(
|
|
253
|
+
'../../../../dist/node/ChildProcessRunner/ChildProcessRunnerTest/serializedWorker.js',
|
|
254
|
+
import.meta.url,
|
|
255
|
+
),
|
|
256
|
+
['test-client'],
|
|
257
|
+
)
|
|
258
|
+
|
|
259
|
+
childPid = nodeChildProcess.pid
|
|
260
|
+
|
|
261
|
+
const testEffect = Effect.gen(function* () {
|
|
262
|
+
const worker = yield* EffectWorker.makePoolSerialized<WorkerMessage>({
|
|
263
|
+
size: 1,
|
|
264
|
+
concurrency: 100,
|
|
265
|
+
initialMessage: () => new InitialMessage({ name: 'test', data: new Uint8Array([1, 2, 3]) }),
|
|
266
|
+
}).pipe(Effect.provide(ChildProcessWorker.layer(() => nodeChildProcess)))
|
|
267
|
+
|
|
268
|
+
// Start stubborn worker
|
|
269
|
+
yield* worker.executeEffect(new StartStubbornWorker({ blockDuration: 60_000 }))
|
|
270
|
+
|
|
271
|
+
// Verify process is running
|
|
272
|
+
if (childPid) {
|
|
273
|
+
assert.strictEqual(isProcessRunning(childPid), true, 'Child process should be running')
|
|
274
|
+
}
|
|
275
|
+
|
|
276
|
+
// Keep running until interrupted
|
|
277
|
+
yield* Effect.sleep('30 seconds')
|
|
278
|
+
}).pipe(Effect.scoped)
|
|
279
|
+
|
|
280
|
+
// Simulate the exact abortion pattern from node-sync
|
|
281
|
+
const fiber = yield* Effect.fork(testEffect)
|
|
282
|
+
yield* Effect.sleep('2 seconds')
|
|
283
|
+
|
|
284
|
+
// Force kill the fiber without proper cleanup (simulates Ctrl+C)
|
|
285
|
+
yield* Fiber.interrupt(fiber)
|
|
286
|
+
|
|
287
|
+
// Wait for cleanup
|
|
288
|
+
yield* Effect.sleep('3 seconds')
|
|
289
|
+
|
|
290
|
+
// This test should initially fail - demonstrating the zombie process issue
|
|
291
|
+
if (childPid) {
|
|
292
|
+
assert.strictEqual(
|
|
293
|
+
isProcessRunning(childPid),
|
|
294
|
+
false,
|
|
295
|
+
`Child process ${childPid} should be terminated after forced interruption`,
|
|
296
|
+
)
|
|
297
|
+
} else {
|
|
298
|
+
assert.fail('Child PID was not captured')
|
|
299
|
+
}
|
|
300
|
+
}).pipe(Effect.runPromise))
|
|
301
|
+
})
|
|
52
302
|
})
|
|
@@ -35,6 +35,12 @@ export class RunnerInterrupt extends Schema.TaggedRequest<RunnerInterrupt>()('Ru
|
|
|
35
35
|
payload: {},
|
|
36
36
|
}) {}
|
|
37
37
|
|
|
38
|
+
export class StartStubbornWorker extends Schema.TaggedRequest<StartStubbornWorker>()('StartStubbornWorker', {
|
|
39
|
+
failure: Schema.Never,
|
|
40
|
+
success: Schema.Struct({ pid: Schema.Number }),
|
|
41
|
+
payload: { blockDuration: Schema.Number },
|
|
42
|
+
}) {}
|
|
43
|
+
|
|
38
44
|
export class InitialMessage extends Schema.TaggedRequest<InitialMessage>()('InitialMessage', {
|
|
39
45
|
failure: Schema.Never,
|
|
40
46
|
success: Schema.Void,
|
|
@@ -61,5 +67,12 @@ export class GetSpan extends Schema.TaggedRequest<GetSpan>()('GetSpan', {
|
|
|
61
67
|
payload: {},
|
|
62
68
|
}) {}
|
|
63
69
|
|
|
64
|
-
export const WorkerMessage = Schema.Union(
|
|
70
|
+
export const WorkerMessage = Schema.Union(
|
|
71
|
+
GetUserById,
|
|
72
|
+
GetPersonById,
|
|
73
|
+
InitialMessage,
|
|
74
|
+
GetSpan,
|
|
75
|
+
RunnerInterrupt,
|
|
76
|
+
StartStubbornWorker,
|
|
77
|
+
)
|
|
65
78
|
export type WorkerMessage = Schema.Schema.Type<typeof WorkerMessage>
|
|
@@ -22,7 +22,7 @@ const WorkerLive = Runner.layerSerialized(WorkerMessage, {
|
|
|
22
22
|
// InitialMessage: (req) => Layer.succeed(Name, req.name),
|
|
23
23
|
InitialMessage: (req) =>
|
|
24
24
|
Effect.gen(function* () {
|
|
25
|
-
yield* Effect.addFinalizer(() => Effect.log('closing worker scope'))
|
|
25
|
+
// yield* Effect.addFinalizer(() => Effect.log('closing worker scope'))
|
|
26
26
|
return Layer.succeed(Name, req.name)
|
|
27
27
|
}).pipe(Layer.unwrapScoped),
|
|
28
28
|
// InitialMessage: (req) =>
|
|
@@ -47,6 +47,19 @@ const WorkerLive = Runner.layerSerialized(WorkerMessage, {
|
|
|
47
47
|
}
|
|
48
48
|
}).pipe(Effect.withSpan('GetSpan')),
|
|
49
49
|
RunnerInterrupt: () => Effect.interrupt,
|
|
50
|
+
StartStubbornWorker: ({ blockDuration }) =>
|
|
51
|
+
Effect.gen(function* () {
|
|
52
|
+
// Start a blocking operation that won't respond to normal shutdown signals
|
|
53
|
+
const pid = process.pid
|
|
54
|
+
yield* Effect.fork(
|
|
55
|
+
Effect.gen(function* () {
|
|
56
|
+
// Block for the specified duration, ignoring shutdown attempts
|
|
57
|
+
yield* Effect.sleep(`${blockDuration} millis`)
|
|
58
|
+
yield* Effect.log('Stubborn worker finished blocking')
|
|
59
|
+
}).pipe(Effect.uninterruptible),
|
|
60
|
+
)
|
|
61
|
+
return { pid }
|
|
62
|
+
}),
|
|
50
63
|
}).pipe(Layer.provide(ChildProcessRunner.layer))
|
|
51
64
|
// }).pipe(Layer.provide(PlatformNode.NodeWorkerRunner.layer))
|
|
52
65
|
|
|
@@ -7,22 +7,114 @@ import * as Exit from 'effect/Exit'
|
|
|
7
7
|
import * as Layer from 'effect/Layer'
|
|
8
8
|
import * as Scope from 'effect/Scope'
|
|
9
9
|
|
|
10
|
+
// Track child processes for cleanup on process signals
|
|
11
|
+
const childProcesses = new Set<ChildProcess.ChildProcess>()
|
|
12
|
+
|
|
13
|
+
// Force cleanup all tracked child processes
|
|
14
|
+
const forceCleanupChildren = (signal: NodeJS.Signals = 'SIGKILL') => {
|
|
15
|
+
for (const child of childProcesses) {
|
|
16
|
+
try {
|
|
17
|
+
if (!child.killed) {
|
|
18
|
+
child.kill(signal)
|
|
19
|
+
}
|
|
20
|
+
} catch {
|
|
21
|
+
// Ignore errors during cleanup
|
|
22
|
+
}
|
|
23
|
+
}
|
|
24
|
+
childProcesses.clear()
|
|
25
|
+
}
|
|
26
|
+
|
|
27
|
+
// Install signal handlers once to clean up all child processes
|
|
28
|
+
let signalHandlersInstalled = false
|
|
29
|
+
|
|
30
|
+
const installSignalHandlers = () => {
|
|
31
|
+
if (signalHandlersInstalled) return
|
|
32
|
+
signalHandlersInstalled = true
|
|
33
|
+
|
|
34
|
+
// Use 'beforeExit' instead of signal handlers since tests may interfere with signals
|
|
35
|
+
process.on('beforeExit', () => {
|
|
36
|
+
forceCleanupChildren('SIGKILL')
|
|
37
|
+
})
|
|
38
|
+
|
|
39
|
+
// Also try to cleanup on uncaught exceptions
|
|
40
|
+
process.on('uncaughtException', () => {
|
|
41
|
+
forceCleanupChildren('SIGKILL')
|
|
42
|
+
})
|
|
43
|
+
|
|
44
|
+
// Install signal handlers but make them more robust
|
|
45
|
+
const sigintHandler = () => {
|
|
46
|
+
forceCleanupChildren('SIGINT')
|
|
47
|
+
}
|
|
48
|
+
|
|
49
|
+
const sigtermHandler = () => {
|
|
50
|
+
forceCleanupChildren('SIGTERM')
|
|
51
|
+
}
|
|
52
|
+
|
|
53
|
+
const exitHandler = () => {
|
|
54
|
+
forceCleanupChildren('SIGKILL')
|
|
55
|
+
}
|
|
56
|
+
|
|
57
|
+
// Add handlers that will persist even if tests remove/add other handlers
|
|
58
|
+
process.prependListener('SIGINT', sigintHandler)
|
|
59
|
+
process.prependListener('SIGTERM', sigtermHandler)
|
|
60
|
+
process.prependListener('exit', exitHandler)
|
|
61
|
+
}
|
|
62
|
+
|
|
10
63
|
const platformWorkerImpl = Worker.makePlatform<ChildProcess.ChildProcess>()({
|
|
11
64
|
setup({ scope, worker: childProcess }) {
|
|
12
65
|
return Effect.flatMap(Deferred.make<void, WorkerError>(), (exitDeferred) => {
|
|
66
|
+
// Install signal handlers for process-wide cleanup
|
|
67
|
+
installSignalHandlers()
|
|
68
|
+
|
|
69
|
+
// Track this child process for cleanup
|
|
70
|
+
childProcesses.add(childProcess)
|
|
71
|
+
|
|
13
72
|
childProcess.on('exit', () => {
|
|
73
|
+
// Remove from tracking when process exits
|
|
74
|
+
childProcesses.delete(childProcess)
|
|
14
75
|
Deferred.unsafeDone(exitDeferred, Exit.void)
|
|
15
76
|
})
|
|
77
|
+
|
|
78
|
+
childProcess.send(['setup-parent-death-detection', { parentPid: process.pid }])
|
|
79
|
+
|
|
16
80
|
return Effect.as(
|
|
17
81
|
Scope.addFinalizer(
|
|
18
82
|
scope,
|
|
19
83
|
Effect.suspend(() => {
|
|
20
|
-
|
|
84
|
+
// Try graceful shutdown first
|
|
85
|
+
try {
|
|
86
|
+
childProcess.send([1])
|
|
87
|
+
} catch {
|
|
88
|
+
// IPC channel might be closed, proceed to forceful termination
|
|
89
|
+
}
|
|
90
|
+
|
|
21
91
|
return Deferred.await(exitDeferred)
|
|
22
92
|
}).pipe(
|
|
23
|
-
Effect.timeout(
|
|
93
|
+
Effect.timeout(3000), // Reduced timeout for faster cleanup
|
|
24
94
|
Effect.interruptible,
|
|
25
|
-
Effect.catchAllCause(() =>
|
|
95
|
+
Effect.catchAllCause(() =>
|
|
96
|
+
Effect.sync(() => {
|
|
97
|
+
// Enhanced cleanup with escalating signals
|
|
98
|
+
if (!childProcess.killed) {
|
|
99
|
+
try {
|
|
100
|
+
// First try SIGTERM
|
|
101
|
+
childProcess.kill('SIGTERM')
|
|
102
|
+
|
|
103
|
+
// If still running after a short delay, use SIGKILL
|
|
104
|
+
setTimeout(() => {
|
|
105
|
+
if (!childProcess.killed) {
|
|
106
|
+
childProcess.kill('SIGKILL')
|
|
107
|
+
}
|
|
108
|
+
}, 1000)
|
|
109
|
+
} catch {
|
|
110
|
+
// Process might already be dead
|
|
111
|
+
} finally {
|
|
112
|
+
// Ensure it's removed from tracking
|
|
113
|
+
childProcesses.delete(childProcess)
|
|
114
|
+
}
|
|
115
|
+
}
|
|
116
|
+
}),
|
|
117
|
+
),
|
|
26
118
|
),
|
|
27
119
|
),
|
|
28
120
|
{
|
|
@@ -56,5 +148,21 @@ export const layerWorker = Layer.succeed(Worker.PlatformWorker, platformWorkerIm
|
|
|
56
148
|
|
|
57
149
|
export const layerManager = Layer.provide(Worker.layerManager, layerWorker)
|
|
58
150
|
|
|
151
|
+
/**
|
|
152
|
+
* @example
|
|
153
|
+
* ```ts
|
|
154
|
+
* import * as ChildProcess from 'node:child_process'
|
|
155
|
+
* import { Effect, Worker } from '@effect/platform/Worker'
|
|
156
|
+
* import { ChildProcessWorker } from '@livestore/utils/node'
|
|
157
|
+
*
|
|
158
|
+
* Worker.makePoolSerialized<WorkerMessage>({
|
|
159
|
+
* size: 1,
|
|
160
|
+
* initialMessage: () => new InitialMessage({ name: 'test', data: new Uint8Array([1, 2, 3]) }),
|
|
161
|
+
* }).pipe(
|
|
162
|
+
* Effect.provide(ChildProcessWorker.layer(() => ChildProcess.fork(new URL('worker.ts', import.meta.url)))),
|
|
163
|
+
* )
|
|
164
|
+
* ```
|
|
165
|
+
*
|
|
166
|
+
*/
|
|
59
167
|
export const layer = (spawn: (id: number) => ChildProcess.ChildProcess) =>
|
|
60
168
|
Layer.merge(layerManager, Worker.layerSpawner(spawn))
|