async-container-supervisor 0.1.0 → 0.3.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- checksums.yaml.gz.sig +0 -0
- data/lib/async/container/supervisor/client.rb +30 -87
- data/lib/async/container/supervisor/connection.rb +68 -10
- data/lib/async/container/supervisor/dispatchable.rb +30 -0
- data/lib/async/container/supervisor/environment.rb +1 -1
- data/lib/async/container/supervisor/memory_monitor.rb +6 -1
- data/lib/async/container/supervisor/server.rb +30 -10
- data/lib/async/container/supervisor/version.rb +1 -1
- data/lib/async/container/supervisor/worker.rb +87 -0
- data/lib/async/container/supervisor.rb +1 -0
- data.tar.gz.sig +0 -0
- metadata +4 -16
- metadata.gz.sig +0 -0
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: b5936025301bd2c10f66b6e1348207c6f59e94e41c9a5ac4705fe3cd70d6ed90
|
4
|
+
data.tar.gz: 19aea49ee76c918c3192acb05a6fb2ac87cdb73cb75c95965b716bcf9f571c56
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 8cf7aa51fb4dd4947d43b37db76eb990ad06be54d5e9c1fa67016881cc80ef7d703aec514ca4be6243b33763dca221f2a8fe5f957c2fc0566d35cc893f0f3b10
|
7
|
+
data.tar.gz: 39d5d9b0ef7913af202d0ff7fc62b31afcbe3355075b21093deb6554fcc82a4a63a6ba68b0d5a67e91c4e367a873e616d124830fe87a535289bcfad5be003565
|
checksums.yaml.gz.sig
CHANGED
Binary file
|
@@ -3,120 +3,63 @@
|
|
3
3
|
# Released under the MIT License.
|
4
4
|
# Copyright, 2025, by Samuel Williams.
|
5
5
|
|
6
|
-
require "io/stream"
|
7
6
|
require_relative "connection"
|
7
|
+
require_relative "dispatchable"
|
8
8
|
|
9
9
|
module Async
|
10
10
|
module Container
|
11
11
|
module Supervisor
|
12
|
+
# A client provides a mechanism to connect to a supervisor server in order to execute operations.
|
12
13
|
class Client
|
13
|
-
def
|
14
|
-
|
14
|
+
def initialize(endpoint: Supervisor.endpoint)
|
15
|
+
@endpoint = endpoint
|
15
16
|
end
|
16
17
|
|
17
|
-
|
18
|
-
|
19
|
-
|
18
|
+
include Dispatchable
|
19
|
+
|
20
|
+
protected def connect!
|
21
|
+
peer = @endpoint.connect
|
22
|
+
return Connection.new(peer, 0)
|
20
23
|
end
|
21
24
|
|
22
|
-
|
23
|
-
|
24
|
-
|
25
|
+
# Called when a connection is established.
|
26
|
+
protected def connected!(connection)
|
27
|
+
# Do nothing by default.
|
25
28
|
end
|
26
29
|
|
30
|
+
# Connect to the server.
|
27
31
|
def connect
|
28
|
-
|
29
|
-
|
30
|
-
stream = IO::Stream(peer)
|
31
|
-
@connection = Connection.new(stream, 0, instance: @instance)
|
32
|
-
|
33
|
-
# Register the instance with the server:
|
34
|
-
Async do
|
35
|
-
@connection.call(do: :register, state: @instance)
|
36
|
-
end
|
37
|
-
end
|
32
|
+
connection = connect!
|
33
|
+
connection.run_in_background(self)
|
38
34
|
|
39
|
-
|
35
|
+
connected!(connection)
|
36
|
+
|
37
|
+
return connection unless block_given?
|
40
38
|
|
41
39
|
begin
|
42
|
-
yield
|
40
|
+
yield connection
|
43
41
|
ensure
|
44
|
-
@connection.close
|
45
|
-
end
|
46
|
-
end
|
47
|
-
|
48
|
-
def close
|
49
|
-
if connection = @connection
|
50
|
-
@connection = nil
|
51
42
|
connection.close
|
52
43
|
end
|
53
44
|
end
|
54
45
|
|
55
|
-
|
56
|
-
if path = call[:path]
|
57
|
-
File.open(path, "w") do |file|
|
58
|
-
yield file
|
59
|
-
end
|
60
|
-
|
61
|
-
call.finish(path: path)
|
62
|
-
else
|
63
|
-
buffer = StringIO.new
|
64
|
-
yield buffer
|
65
|
-
|
66
|
-
call.finish(data: buffer.string)
|
67
|
-
end
|
68
|
-
end
|
69
|
-
|
70
|
-
def do_scheduler_dump(call)
|
71
|
-
dump(call) do |file|
|
72
|
-
Fiber.scheduler.print_hierarchy(file)
|
73
|
-
end
|
74
|
-
end
|
75
|
-
|
76
|
-
def do_memory_dump(call)
|
77
|
-
require "objspace"
|
78
|
-
|
79
|
-
dump(call) do |file|
|
80
|
-
ObjectSpace.dump_all(output: file)
|
81
|
-
end
|
82
|
-
end
|
83
|
-
|
84
|
-
def do_thread_dump(call)
|
85
|
-
dump(call) do |file|
|
86
|
-
Thread.list.each do |thread|
|
87
|
-
file.puts(thread.inspect)
|
88
|
-
file.puts(thread.backtrace)
|
89
|
-
end
|
90
|
-
end
|
91
|
-
end
|
92
|
-
|
93
|
-
def do_garbage_profile_start(call)
|
94
|
-
GC::Profiler.enable
|
95
|
-
call.finish(started: true)
|
96
|
-
end
|
97
|
-
|
98
|
-
def do_garbage_profile_stop(call)
|
99
|
-
GC::Profiler.disable
|
100
|
-
|
101
|
-
dump(connection, message) do |file|
|
102
|
-
file.puts GC::Profiler.result
|
103
|
-
end
|
104
|
-
end
|
105
|
-
|
46
|
+
# Run the client in a loop, reconnecting if necessary.
|
106
47
|
def run
|
107
|
-
Async do
|
48
|
+
Async do
|
108
49
|
loop do
|
109
|
-
|
110
|
-
|
50
|
+
connection = connect!
|
51
|
+
|
52
|
+
Async do
|
53
|
+
connected!(connection)
|
111
54
|
end
|
112
|
-
rescue => error
|
113
|
-
Console.error(self, "Unexpected error while running client!", exception: error)
|
114
55
|
|
115
|
-
|
56
|
+
connection.run(self)
|
57
|
+
rescue => error
|
58
|
+
Console.error(self, "Connection failed:", exception: error)
|
116
59
|
sleep(rand)
|
60
|
+
ensure
|
61
|
+
connection.close
|
117
62
|
end
|
118
|
-
ensure
|
119
|
-
task.stop
|
120
63
|
end
|
121
64
|
end
|
122
65
|
end
|
@@ -18,6 +18,14 @@ module Async
|
|
18
18
|
@queue = ::Thread::Queue.new
|
19
19
|
end
|
20
20
|
|
21
|
+
def as_json(...)
|
22
|
+
@message
|
23
|
+
end
|
24
|
+
|
25
|
+
def to_json(...)
|
26
|
+
as_json.to_json(...)
|
27
|
+
end
|
28
|
+
|
21
29
|
# @attribute [Connection] The connection that initiated the call.
|
22
30
|
attr :connection
|
23
31
|
|
@@ -36,6 +44,11 @@ module Async
|
|
36
44
|
@queue.pop(...)
|
37
45
|
end
|
38
46
|
|
47
|
+
# The call was never completed and the connection itself was closed.
|
48
|
+
def close
|
49
|
+
@queue.close
|
50
|
+
end
|
51
|
+
|
39
52
|
def each(&block)
|
40
53
|
while response = self.pop
|
41
54
|
yield response
|
@@ -47,6 +60,10 @@ module Async
|
|
47
60
|
@queue.close
|
48
61
|
end
|
49
62
|
|
63
|
+
def fail(**response)
|
64
|
+
self.finish(failed: true, **response)
|
65
|
+
end
|
66
|
+
|
50
67
|
def closed?
|
51
68
|
@queue.closed?
|
52
69
|
end
|
@@ -76,23 +93,45 @@ module Async
|
|
76
93
|
call = self.new(connection, id, message)
|
77
94
|
|
78
95
|
connection.calls[id] = call
|
79
|
-
|
80
|
-
|
81
|
-
|
82
|
-
|
83
|
-
|
84
|
-
|
96
|
+
begin
|
97
|
+
connection.write(id: id, **message)
|
98
|
+
|
99
|
+
if block_given?
|
100
|
+
call.each(&block)
|
101
|
+
else
|
102
|
+
intermediate = nil
|
103
|
+
|
104
|
+
while response = call.pop
|
105
|
+
if response.delete(:finished)
|
106
|
+
if intermediate
|
107
|
+
if response.any?
|
108
|
+
intermediate << response
|
109
|
+
end
|
110
|
+
|
111
|
+
return intermediate
|
112
|
+
else
|
113
|
+
return response
|
114
|
+
end
|
115
|
+
else
|
116
|
+
# Buffer intermediate responses:
|
117
|
+
intermediate ||= []
|
118
|
+
intermediate << response
|
119
|
+
end
|
120
|
+
end
|
121
|
+
end
|
122
|
+
ensure
|
123
|
+
connection.calls.delete(id)
|
85
124
|
end
|
86
125
|
end
|
87
126
|
end
|
88
127
|
|
89
|
-
def initialize(stream, id, **state)
|
128
|
+
def initialize(stream, id = 0, **state)
|
90
129
|
@stream = stream
|
130
|
+
@id = id
|
91
131
|
@state = state
|
92
132
|
|
133
|
+
@reader = nil
|
93
134
|
@calls = {}
|
94
|
-
|
95
|
-
@id = id
|
96
135
|
end
|
97
136
|
|
98
137
|
# @attribute [Hash(Integer, Call)] Calls in progress.
|
@@ -139,7 +178,7 @@ module Async
|
|
139
178
|
|
140
179
|
def run(target)
|
141
180
|
self.each do |message|
|
142
|
-
if id = message
|
181
|
+
if id = message.delete(:id)
|
143
182
|
if call = @calls[id]
|
144
183
|
# Response to a call:
|
145
184
|
call.push(**message)
|
@@ -153,11 +192,30 @@ module Async
|
|
153
192
|
end
|
154
193
|
end
|
155
194
|
|
195
|
+
def run_in_background(target, parent: Task.current)
|
196
|
+
@reader ||= parent.async do
|
197
|
+
self.run(target)
|
198
|
+
end
|
199
|
+
end
|
200
|
+
|
156
201
|
def close
|
202
|
+
if @reader
|
203
|
+
@reader.stop
|
204
|
+
@reader = nil
|
205
|
+
end
|
206
|
+
|
157
207
|
if stream = @stream
|
158
208
|
@stream = nil
|
159
209
|
stream.close
|
160
210
|
end
|
211
|
+
|
212
|
+
if @calls
|
213
|
+
@calls.each do |id, call|
|
214
|
+
call.close
|
215
|
+
end
|
216
|
+
|
217
|
+
@calls.clear
|
218
|
+
end
|
161
219
|
end
|
162
220
|
end
|
163
221
|
end
|
@@ -0,0 +1,30 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
# Released under the MIT License.
|
4
|
+
# Copyright, 2025, by Samuel Williams.
|
5
|
+
|
6
|
+
require_relative "connection"
|
7
|
+
require_relative "endpoint"
|
8
|
+
|
9
|
+
require "io/stream"
|
10
|
+
|
11
|
+
module Async
|
12
|
+
module Container
|
13
|
+
module Supervisor
|
14
|
+
module Dispatchable
|
15
|
+
def dispatch(call)
|
16
|
+
method_name = "do_#{call.message[:do]}"
|
17
|
+
self.public_send(method_name, call)
|
18
|
+
rescue => error
|
19
|
+
Console.error(self, "Error while dispatching call.", exception: error, call: call)
|
20
|
+
|
21
|
+
call.fail(error: {
|
22
|
+
class: error.class,
|
23
|
+
message: error.message,
|
24
|
+
backtrace: error.backtrace,
|
25
|
+
})
|
26
|
+
end
|
27
|
+
end
|
28
|
+
end
|
29
|
+
end
|
30
|
+
end
|
@@ -10,7 +10,7 @@ module Async
|
|
10
10
|
module Container
|
11
11
|
module Supervisor
|
12
12
|
class MemoryMonitor
|
13
|
-
def initialize(interval: 10, limit: nil)
|
13
|
+
def initialize(interval: 10, limit: nil, &block)
|
14
14
|
@interval = interval
|
15
15
|
@cluster = Memory::Leak::Cluster.new(limit: limit)
|
16
16
|
@processes = Hash.new{|hash, key| hash[key] = Set.new.compare_by_identity}
|
@@ -42,6 +42,10 @@ module Async
|
|
42
42
|
end
|
43
43
|
end
|
44
44
|
|
45
|
+
def status(call)
|
46
|
+
call.push(memory_monitor: @cluster)
|
47
|
+
end
|
48
|
+
|
45
49
|
def run
|
46
50
|
Async do
|
47
51
|
while true
|
@@ -54,6 +58,7 @@ module Async
|
|
54
58
|
|
55
59
|
response = connection.call(do: :memory_dump, path: path, timeout: 30)
|
56
60
|
Console.info(self, "Memory dump saved to:", path, response: response)
|
61
|
+
@block.call(response) if @block
|
57
62
|
end
|
58
63
|
|
59
64
|
# Kill the process:
|
@@ -5,24 +5,25 @@
|
|
5
5
|
|
6
6
|
require_relative "connection"
|
7
7
|
require_relative "endpoint"
|
8
|
+
require_relative "dispatchable"
|
8
9
|
|
9
10
|
require "io/stream"
|
10
11
|
|
11
12
|
module Async
|
12
13
|
module Container
|
13
14
|
module Supervisor
|
15
|
+
# The server represents the main supervisor process which is responsible for managing the lifecycle of other processes.
|
16
|
+
#
|
17
|
+
# There are various tasks that can be executed by the server, such as restarting the process group, and querying the status of the processes. The server is also responsible for managing the lifecycle of the monitors, which can be used to monitor the status of the connected workers.
|
14
18
|
class Server
|
15
|
-
def initialize(endpoint
|
16
|
-
@endpoint = endpoint
|
19
|
+
def initialize(monitors: [], endpoint: Supervisor.endpoint)
|
17
20
|
@monitors = monitors
|
21
|
+
@endpoint = endpoint
|
18
22
|
end
|
19
23
|
|
20
24
|
attr :monitors
|
21
25
|
|
22
|
-
|
23
|
-
method_name = "do_#{call.message[:do]}"
|
24
|
-
self.public_send(method_name, call)
|
25
|
-
end
|
26
|
+
include Dispatchable
|
26
27
|
|
27
28
|
def do_register(call)
|
28
29
|
call.connection.state.merge!(call.message[:state])
|
@@ -38,6 +39,26 @@ module Async
|
|
38
39
|
call.finish
|
39
40
|
end
|
40
41
|
|
42
|
+
# Restart the current process group, usually including the supervisor and any other processes.
|
43
|
+
#
|
44
|
+
# @parameter signal [Symbol] The signal to send to the process group.
|
45
|
+
def do_restart(call)
|
46
|
+
signal = call[:signal] || :INT
|
47
|
+
|
48
|
+
# We are going to terminate the progress group, including *this* process, so finish the current RPC before that:
|
49
|
+
call.finish
|
50
|
+
|
51
|
+
::Process.kill(signal, ::Process.ppid)
|
52
|
+
end
|
53
|
+
|
54
|
+
def do_status(call)
|
55
|
+
@monitors.each do |monitor|
|
56
|
+
monitor.status(call)
|
57
|
+
end
|
58
|
+
|
59
|
+
call.finish
|
60
|
+
end
|
61
|
+
|
41
62
|
def remove(connection)
|
42
63
|
@monitors.each do |monitor|
|
43
64
|
begin
|
@@ -48,8 +69,8 @@ module Async
|
|
48
69
|
end
|
49
70
|
end
|
50
71
|
|
51
|
-
def run
|
52
|
-
|
72
|
+
def run(parent: Task.current)
|
73
|
+
parent.async do |task|
|
53
74
|
@monitors.each do |monitor|
|
54
75
|
begin
|
55
76
|
monitor.run
|
@@ -59,8 +80,7 @@ module Async
|
|
59
80
|
end
|
60
81
|
|
61
82
|
@endpoint.accept do |peer|
|
62
|
-
|
63
|
-
connection = Connection.new(stream, 1, remote_address: peer.remote_address)
|
83
|
+
connection = Connection.new(peer, 1)
|
64
84
|
connection.run(self)
|
65
85
|
ensure
|
66
86
|
connection.close
|
@@ -0,0 +1,87 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
# Released under the MIT License.
|
4
|
+
# Copyright, 2025, by Samuel Williams.
|
5
|
+
|
6
|
+
require_relative "client"
|
7
|
+
require_relative "dispatchable"
|
8
|
+
|
9
|
+
module Async
|
10
|
+
module Container
|
11
|
+
module Supervisor
|
12
|
+
# A worker represents a long running process that can be controlled by the supervisor.
|
13
|
+
#
|
14
|
+
# There are various tasks that can be executed by the worker, such as dumping memory, threads, and garbage collection profiles.
|
15
|
+
class Worker < Client
|
16
|
+
def self.run(...)
|
17
|
+
self.new(...).run
|
18
|
+
end
|
19
|
+
|
20
|
+
def initialize(state, endpoint: Supervisor.endpoint)
|
21
|
+
@state = state
|
22
|
+
@endpoint = endpoint
|
23
|
+
end
|
24
|
+
|
25
|
+
include Dispatchable
|
26
|
+
|
27
|
+
private def dump(call)
|
28
|
+
if path = call[:path]
|
29
|
+
File.open(path, "w") do |file|
|
30
|
+
yield file
|
31
|
+
end
|
32
|
+
|
33
|
+
call.finish(path: path)
|
34
|
+
else
|
35
|
+
buffer = StringIO.new
|
36
|
+
yield buffer
|
37
|
+
|
38
|
+
call.finish(data: buffer.string)
|
39
|
+
end
|
40
|
+
end
|
41
|
+
|
42
|
+
def do_scheduler_dump(call)
|
43
|
+
dump(call) do |file|
|
44
|
+
Fiber.scheduler.print_hierarchy(file)
|
45
|
+
end
|
46
|
+
end
|
47
|
+
|
48
|
+
def do_memory_dump(call)
|
49
|
+
require "objspace"
|
50
|
+
|
51
|
+
dump(call) do |file|
|
52
|
+
ObjectSpace.dump_all(output: file)
|
53
|
+
end
|
54
|
+
end
|
55
|
+
|
56
|
+
def do_thread_dump(call)
|
57
|
+
dump(call) do |file|
|
58
|
+
Thread.list.each do |thread|
|
59
|
+
file.puts(thread.inspect)
|
60
|
+
file.puts(thread.backtrace)
|
61
|
+
end
|
62
|
+
end
|
63
|
+
end
|
64
|
+
|
65
|
+
def do_garbage_profile_start(call)
|
66
|
+
GC::Profiler.enable
|
67
|
+
call.finish(started: true)
|
68
|
+
end
|
69
|
+
|
70
|
+
def do_garbage_profile_stop(call)
|
71
|
+
GC::Profiler.disable
|
72
|
+
|
73
|
+
dump(connection, message) do |file|
|
74
|
+
file.puts GC::Profiler.result
|
75
|
+
end
|
76
|
+
end
|
77
|
+
|
78
|
+
protected def connected!(connection)
|
79
|
+
super
|
80
|
+
|
81
|
+
# Register the worker with the supervisor:
|
82
|
+
connection.call(do: :register, state: @state)
|
83
|
+
end
|
84
|
+
end
|
85
|
+
end
|
86
|
+
end
|
87
|
+
end
|
data.tar.gz.sig
CHANGED
Binary file
|
metadata
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: async-container-supervisor
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.
|
4
|
+
version: 0.3.0
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Samuel Williams
|
@@ -36,7 +36,7 @@ cert_chain:
|
|
36
36
|
Q2K9NVun/S785AP05vKkXZEFYxqG6EW012U4oLcFl5MySFajYXRYbuUpH6AY+HP8
|
37
37
|
voD0MPg1DssDLKwXyt1eKD/+Fq0bFWhwVM/1XiAXL7lyYUyOq24KHgQ2Csg=
|
38
38
|
-----END CERTIFICATE-----
|
39
|
-
date: 2025-02-
|
39
|
+
date: 2025-02-27 00:00:00.000000000 Z
|
40
40
|
dependencies:
|
41
41
|
- !ruby/object:Gem::Dependency
|
42
42
|
name: async-container
|
@@ -80,20 +80,6 @@ dependencies:
|
|
80
80
|
- - ">="
|
81
81
|
- !ruby/object:Gem::Version
|
82
82
|
version: '0'
|
83
|
-
- !ruby/object:Gem::Dependency
|
84
|
-
name: io-stream
|
85
|
-
requirement: !ruby/object:Gem::Requirement
|
86
|
-
requirements:
|
87
|
-
- - ">="
|
88
|
-
- !ruby/object:Gem::Version
|
89
|
-
version: '0'
|
90
|
-
type: :runtime
|
91
|
-
prerelease: false
|
92
|
-
version_requirements: !ruby/object:Gem::Requirement
|
93
|
-
requirements:
|
94
|
-
- - ">="
|
95
|
-
- !ruby/object:Gem::Version
|
96
|
-
version: '0'
|
97
83
|
- !ruby/object:Gem::Dependency
|
98
84
|
name: memory-leak
|
99
85
|
requirement: !ruby/object:Gem::Requirement
|
@@ -115,12 +101,14 @@ files:
|
|
115
101
|
- lib/async/container/supervisor.rb
|
116
102
|
- lib/async/container/supervisor/client.rb
|
117
103
|
- lib/async/container/supervisor/connection.rb
|
104
|
+
- lib/async/container/supervisor/dispatchable.rb
|
118
105
|
- lib/async/container/supervisor/endpoint.rb
|
119
106
|
- lib/async/container/supervisor/environment.rb
|
120
107
|
- lib/async/container/supervisor/memory_monitor.rb
|
121
108
|
- lib/async/container/supervisor/server.rb
|
122
109
|
- lib/async/container/supervisor/service.rb
|
123
110
|
- lib/async/container/supervisor/version.rb
|
111
|
+
- lib/async/container/supervisor/worker.rb
|
124
112
|
- license.md
|
125
113
|
- readme.md
|
126
114
|
- releases.md
|
metadata.gz.sig
CHANGED
Binary file
|