libvirt_ffi 0.1.0
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +7 -0
- data/.gitignore +10 -0
- data/.travis.yml +6 -0
- data/CODE_OF_CONDUCT.md +74 -0
- data/Gemfile +13 -0
- data/LICENSE.txt +21 -0
- data/README.md +44 -0
- data/Rakefile +10 -0
- data/bin/console +14 -0
- data/bin/setup +8 -0
- data/exe/libvirt +3 -0
- data/lib/libvirt/connection.rb +107 -0
- data/lib/libvirt/domain.rb +29 -0
- data/lib/libvirt/error.rb +6 -0
- data/lib/libvirt/event.rb +124 -0
- data/lib/libvirt/ffi/common.rb +15 -0
- data/lib/libvirt/ffi/connection.rb +26 -0
- data/lib/libvirt/ffi/domain.rb +95 -0
- data/lib/libvirt/ffi/event.rb +104 -0
- data/lib/libvirt/ffi/libvirt.rb +20 -0
- data/lib/libvirt/lib_version.rb +13 -0
- data/lib/libvirt/util.rb +34 -0
- data/lib/libvirt/version.rb +5 -0
- data/lib/libvirt.rb +40 -0
- data/lib/libvirt_ffi.rb +1 -0
- data/libvirt.gemspec +28 -0
- data/test_usage/support/libvirt_async.rb +534 -0
- data/test_usage/support/log_formatter.rb +38 -0
- data/test_usage/test_event_loop.rb +60 -0
- metadata +89 -0
@@ -0,0 +1,534 @@
|
|
1
|
+
module LibvirtAsync
|
2
|
+
module WithDbg
|
3
|
+
extend ActiveSupport::Concern
|
4
|
+
|
5
|
+
class_methods do
|
6
|
+
def dbg(progname = nil, &block)
|
7
|
+
Libvirt.logger.debug(progname || "#{name}.:0x#{object_id.to_s(16)}", &block)
|
8
|
+
end
|
9
|
+
end
|
10
|
+
|
11
|
+
private
|
12
|
+
|
13
|
+
def dbg(progname = nil, &block)
|
14
|
+
Libvirt.logger.debug(progname || "#{self.class}#:0x#{object_id.to_s(16)}", &block)
|
15
|
+
end
|
16
|
+
end
|
17
|
+
|
18
|
+
module Util
|
19
|
+
def create_task(parent = nil, reactor = nil, &block)
|
20
|
+
parent = Async::Task.current? if parent == :current
|
21
|
+
reactor ||= Async::Task.current.reactor
|
22
|
+
Async::Task.new(reactor, parent, &block)
|
23
|
+
end
|
24
|
+
module_function :create_task
|
25
|
+
end
|
26
|
+
|
27
|
+
class Handle
|
28
|
+
# Represents an event handle (usually a file descriptor). When an event
|
29
|
+
# happens to the handle, we dispatch the event to libvirt via
|
30
|
+
# Libvirt::event_invoke_handle_callback (feeding it the handle_id we returned
|
31
|
+
# from add_handle, the file descriptor, the new events, and the opaque
|
32
|
+
# data that libvirt gave us earlier).
|
33
|
+
|
34
|
+
class Monitor < Async::Wrapper
|
35
|
+
def close
|
36
|
+
cancel_monitor
|
37
|
+
end
|
38
|
+
|
39
|
+
def readiness
|
40
|
+
monitor&.readiness
|
41
|
+
end
|
42
|
+
|
43
|
+
def to_s
|
44
|
+
"#<#{self.class}:0x#{object_id.to_s(16)} readable=#{@readable&.object_id&.to_s(16)} writable=#{@writable&.object_id&.to_s(16)} alive=#{@monitor && !@monitor.closed?}>"
|
45
|
+
end
|
46
|
+
|
47
|
+
def inspect
|
48
|
+
to_s
|
49
|
+
end
|
50
|
+
end
|
51
|
+
|
52
|
+
include WithDbg
|
53
|
+
|
54
|
+
attr_reader :handle_id, :fd, :opaque, :monitor
|
55
|
+
attr_accessor :events
|
56
|
+
|
57
|
+
def initialize(handle_id, fd, events, opaque)
|
58
|
+
dbg { "#{self.class}#initialize handle_id=#{handle_id}, fd=#{fd}, events=#{events}" }
|
59
|
+
|
60
|
+
@handle_id = handle_id
|
61
|
+
@fd = fd
|
62
|
+
@events = events
|
63
|
+
@opaque = opaque
|
64
|
+
@monitor = nil
|
65
|
+
end
|
66
|
+
|
67
|
+
def register
|
68
|
+
dbg { "#{self.class}#register handle_id=#{handle_id}, fd=#{fd}" }
|
69
|
+
|
70
|
+
if (events & Libvirt::EVENT_HANDLE_ERROR) != 0
|
71
|
+
dbg { "#{self.class}#register skip EVENT_HANDLE_ERROR handle_id=#{handle_id}, fd=#{fd}" }
|
72
|
+
end
|
73
|
+
if (events & Libvirt::EVENT_HANDLE_HANGUP) != 0
|
74
|
+
dbg { "#{self.class}#register skip EVENT_HANDLE_HANGUP handle_id=#{handle_id}, fd=#{fd}" }
|
75
|
+
end
|
76
|
+
|
77
|
+
interest = events_to_interest(events)
|
78
|
+
dbg { "#{self.class}#register parse handle_id=#{handle_id}, fd=#{fd}, events=#{events}, interest=#{interest}" }
|
79
|
+
|
80
|
+
if interest.nil?
|
81
|
+
dbg { "#{self.class}#register no interest handle_id=#{handle_id}, fd=#{fd}" }
|
82
|
+
return
|
83
|
+
end
|
84
|
+
|
85
|
+
task = Util.create_task do
|
86
|
+
dbg { "#{self.class}#register_handle Async start handle_id=#{handle_id}, fd=#{fd}" }
|
87
|
+
io_mode = interest_to_io_mode(interest)
|
88
|
+
|
89
|
+
io = IO.new(fd, io_mode, autoclose: false)
|
90
|
+
@monitor = Monitor.new(io)
|
91
|
+
|
92
|
+
while @monitor.readiness == nil
|
93
|
+
cancelled = wait_io(interest)
|
94
|
+
|
95
|
+
if cancelled
|
96
|
+
dbg { "#{self.class}#register_handle async cancel handle_id=#{handle_id}, fd=#{fd}" }
|
97
|
+
break
|
98
|
+
end
|
99
|
+
|
100
|
+
dbg { "#{self.class}#register_handle async resumes readiness=#{@monitor.readiness}, handle_id=#{handle_id}, fd=#{fd}" }
|
101
|
+
events = readiness_to_events(@monitor.readiness)
|
102
|
+
|
103
|
+
unless events.nil?
|
104
|
+
dispatch(events)
|
105
|
+
break
|
106
|
+
end
|
107
|
+
|
108
|
+
dbg { "#{self.class}#register_handle async not ready readiness=#{@monitor.readiness}, handle_id=#{handle_id}, fd=#{fd}" }
|
109
|
+
end
|
110
|
+
|
111
|
+
end
|
112
|
+
|
113
|
+
dbg { "#{self.class}#register_handle invokes fiber=0x#{task.fiber.object_id.to_s(16)} handle_id=#{handle_id}, fd=#{fd}" }
|
114
|
+
task.run
|
115
|
+
dbg { "#{self.class}#register_handle ends handle_id=#{handle_id}, fd=#{fd}" }
|
116
|
+
end
|
117
|
+
|
118
|
+
def unregister
|
119
|
+
dbg { "#{self.class}#unregister handle_id=#{handle_id}, fd=#{fd}" }
|
120
|
+
|
121
|
+
if @monitor.nil?
|
122
|
+
dbg { "#{self.class}#unregister already unregistered handle_id=#{handle_id}, fd=#{fd}" }
|
123
|
+
return
|
124
|
+
end
|
125
|
+
|
126
|
+
@monitor.close
|
127
|
+
@monitor = nil
|
128
|
+
end
|
129
|
+
|
130
|
+
def to_s
|
131
|
+
"#<#{self.class}:0x#{object_id.to_s(16)} handle_id=#{handle_id} fd=#{fd} events=#{events} monitor=#{monitor}>"
|
132
|
+
end
|
133
|
+
|
134
|
+
def inspect
|
135
|
+
to_s
|
136
|
+
end
|
137
|
+
|
138
|
+
private
|
139
|
+
|
140
|
+
def dispatch(events)
|
141
|
+
dbg { "#{self.class}#dispatch starts handle_id=#{handle_id}, events=#{events}, fd=#{fd}" }
|
142
|
+
|
143
|
+
task = Util.create_task do
|
144
|
+
dbg { "#{self.class}#dispatch async starts handle_id=#{handle_id} events=#{events}, fd=#{fd}" }
|
145
|
+
# Libvirt::event_invoke_handle_callback(handle_id, fd, events, opaque)
|
146
|
+
# opaque.call_cb(handle_id, fd, events)
|
147
|
+
Libvirt::Event.invoke_handle_callback(handle_id, fd, events, opaque)
|
148
|
+
dbg { "#{self.class}#dispatch async ends handle_id=#{handle_id} received_events=#{events}, fd=#{fd}" }
|
149
|
+
end
|
150
|
+
# dbg { "#{self.class}#dispatch invokes fiber=0x#{task.fiber.object_id.to_s(16)} handle_id=#{handle_id}, events=#{events}, fd=#{fd}" }
|
151
|
+
# task.run
|
152
|
+
# dbg { "#{self.class}#dispatch ends handle_id=#{handle_id}, events=#{events}, fd=#{fd}" }
|
153
|
+
dbg { "#{self.class}#dispatch schedules fiber=0x#{task.fiber.object_id.to_s(16)} handle_id=#{handle_id}, events=#{events}, fd=#{fd}" }
|
154
|
+
task.reactor << task.fiber
|
155
|
+
end
|
156
|
+
|
157
|
+
def wait_io(interest)
|
158
|
+
meth = interest_to_monitor_method(interest)
|
159
|
+
begin
|
160
|
+
@monitor.public_send(meth)
|
161
|
+
false
|
162
|
+
rescue Monitor::Cancelled => e
|
163
|
+
dbg { "#{self.class}#wait_io cancelled #{e.class} #{e.message}" }
|
164
|
+
true
|
165
|
+
end
|
166
|
+
end
|
167
|
+
|
168
|
+
def interest_to_monitor_method(interest)
|
169
|
+
case interest
|
170
|
+
when :r
|
171
|
+
:wait_readable
|
172
|
+
when :w
|
173
|
+
:wait_writable
|
174
|
+
when :rw
|
175
|
+
:wait_any
|
176
|
+
else
|
177
|
+
raise ArgumentError, "invalid interest #{interest}"
|
178
|
+
end
|
179
|
+
end
|
180
|
+
|
181
|
+
def events_to_interest(events)
|
182
|
+
readable = (events & Libvirt::EVENT_HANDLE_READABLE) != 0
|
183
|
+
writable = (events & Libvirt::EVENT_HANDLE_WRITABLE) != 0
|
184
|
+
if readable && writable
|
185
|
+
:rw
|
186
|
+
elsif readable
|
187
|
+
:r
|
188
|
+
elsif writable
|
189
|
+
:w
|
190
|
+
else
|
191
|
+
nil
|
192
|
+
end
|
193
|
+
end
|
194
|
+
|
195
|
+
def interest_to_io_mode(interest)
|
196
|
+
case interest
|
197
|
+
when :rw
|
198
|
+
'a+'
|
199
|
+
when :r
|
200
|
+
'r'
|
201
|
+
when :w
|
202
|
+
'w'
|
203
|
+
else
|
204
|
+
raise ArgumentError, "invalid interest #{interest}"
|
205
|
+
end
|
206
|
+
end
|
207
|
+
|
208
|
+
def readiness_to_events(readiness)
|
209
|
+
case readiness&.to_sym
|
210
|
+
when :rw
|
211
|
+
Libvirt::EVENT_HANDLE_READABLE | Libvirt::EVENT_HANDLE_WRITABLE
|
212
|
+
when :r
|
213
|
+
Libvirt::EVENT_HANDLE_READABLE
|
214
|
+
when :w
|
215
|
+
Libvirt::EVENT_HANDLE_WRITABLE
|
216
|
+
else
|
217
|
+
nil
|
218
|
+
end
|
219
|
+
end
|
220
|
+
end
|
221
|
+
|
222
|
+
class Timer
|
223
|
+
# Represents a When a timer expires, we dispatch the event to
|
224
|
+
# libvirt via Libvirt::event_invoke_timeout_callback (feeding it the timer_id
|
225
|
+
# we returned from add_timer and the opaque data that libvirt gave us
|
226
|
+
# earlier).
|
227
|
+
|
228
|
+
class Monitor
|
229
|
+
class Cancelled < StandardError
|
230
|
+
def initialize
|
231
|
+
super('was cancelled')
|
232
|
+
end
|
233
|
+
end
|
234
|
+
|
235
|
+
attr_reader :fiber
|
236
|
+
|
237
|
+
def initialize
|
238
|
+
@fiber = nil
|
239
|
+
end
|
240
|
+
|
241
|
+
def wait(timeout)
|
242
|
+
@fiber = Async::Task.current.fiber
|
243
|
+
Async::Task.current.sleep(timeout)
|
244
|
+
@fiber = nil
|
245
|
+
end
|
246
|
+
|
247
|
+
def close
|
248
|
+
@fiber.resume(Cancelled.new) if @fiber&.alive?
|
249
|
+
@fiber = nil
|
250
|
+
end
|
251
|
+
|
252
|
+
def to_s
|
253
|
+
"#<#{self.class}:0x#{object_id.to_s(16)} fiber=#{@fiber&.object_id&.to_s(16)} alive=#{@fiber&.alive?}>"
|
254
|
+
end
|
255
|
+
|
256
|
+
def inspect
|
257
|
+
to_s
|
258
|
+
end
|
259
|
+
end
|
260
|
+
|
261
|
+
include WithDbg
|
262
|
+
|
263
|
+
attr_reader :timer_id, :opaque, :monitor
|
264
|
+
attr_accessor :last_fired, :interval
|
265
|
+
|
266
|
+
def initialize(timer_id, interval, opaque)
|
267
|
+
dbg { "#{self.class}#initialize timer_id=#{timer_id}, interval=#{interval}" }
|
268
|
+
|
269
|
+
@timer_id = timer_id
|
270
|
+
@interval = interval.to_f / 1000.to_f
|
271
|
+
@opaque = opaque
|
272
|
+
@last_fired = Time.now.to_f
|
273
|
+
@monitor = nil
|
274
|
+
end
|
275
|
+
|
276
|
+
def wait_time
|
277
|
+
return if interval < 0
|
278
|
+
last_fired + interval
|
279
|
+
end
|
280
|
+
|
281
|
+
def register
|
282
|
+
dbg { "#{self.class}#register starts timer_id=#{timer_id}, interval=#{interval}" }
|
283
|
+
|
284
|
+
if wait_time.nil?
|
285
|
+
dbg { "#{self.class}#register no wait time timer_id=#{timer_id}, interval=#{interval}" }
|
286
|
+
return
|
287
|
+
end
|
288
|
+
|
289
|
+
task = Util.create_task do
|
290
|
+
dbg { "#{self.class}#register async starts timer_id=#{timer_id}, interval=#{interval}" }
|
291
|
+
now_time = Time.now.to_f
|
292
|
+
timeout = wait_time > now_time ? wait_time - now_time : 0
|
293
|
+
@monitor = Monitor.new
|
294
|
+
cancelled = wait_timer(timeout)
|
295
|
+
|
296
|
+
if cancelled
|
297
|
+
dbg { "#{self.class}#register async cancel timer_id=#{timer_id}, interval=#{interval}" }
|
298
|
+
else
|
299
|
+
dbg { "#{self.class}#register async ready timer_id=#{timer_id}, interval=#{interval}" }
|
300
|
+
self.last_fired = Time.now.to_f
|
301
|
+
dispatch
|
302
|
+
end
|
303
|
+
end
|
304
|
+
|
305
|
+
dbg { "#{self.class}#register invokes fiber=0x#{task.fiber.object_id.to_s(16)} timer_id=#{timer_id}, interval=#{interval}" }
|
306
|
+
task.run
|
307
|
+
dbg { "#{self.class}#register ends timer_id=#{timer_id}, interval=#{interval}" }
|
308
|
+
end
|
309
|
+
|
310
|
+
def unregister
|
311
|
+
dbg { "#{self.class}#unregister_timer timer_id=#{timer_id}, interval=#{interval}" }
|
312
|
+
|
313
|
+
if @monitor.nil?
|
314
|
+
dbg { "#{self.class}#unregister_timer already unregistered timer_id=#{timer_id}, interval=#{interval}" }
|
315
|
+
return
|
316
|
+
end
|
317
|
+
|
318
|
+
@monitor.close
|
319
|
+
@monitor = nil
|
320
|
+
end
|
321
|
+
|
322
|
+
def to_s
|
323
|
+
"#<#{self.class}:0x#{object_id.to_s(16)} timer_id=#{timer_id} interval=#{interval} last_fired=#{last_fired} monitor=#{monitor}>"
|
324
|
+
end
|
325
|
+
|
326
|
+
def inspect
|
327
|
+
to_s
|
328
|
+
end
|
329
|
+
|
330
|
+
private
|
331
|
+
|
332
|
+
def dispatch
|
333
|
+
dbg { "#{self.class}#dispatch starts timer_id=#{timer_id}, interval=#{interval}" }
|
334
|
+
|
335
|
+
task = Util.create_task do
|
336
|
+
dbg { "#{self.class}#dispatch async starts timer_id=#{timer_id}, interval=#{interval}" }
|
337
|
+
# Libvirt::event_invoke_timeout_callback(timer_id, opaque)
|
338
|
+
# opaque.call_cb(timer_id)
|
339
|
+
Libvirt::Event.invoke_timeout_callback(timer_id, opaque)
|
340
|
+
dbg { "#{self.class}#dispatch async async ends timer_id=#{timer_id}, interval=#{interval}" }
|
341
|
+
end
|
342
|
+
|
343
|
+
# dbg { "#{self.class}#dispatch invokes fiber=0x#{task.fiber.object_id.to_s(16)} timer_id=#{timer_id}, interval=#{interval}" }
|
344
|
+
# task.run
|
345
|
+
# dbg { "#{self.class}#dispatch ends timer_id=#{timer_id}, interval=#{interval}" }
|
346
|
+
dbg { "#{self.class}#dispatch schedules fiber=0x#{task.fiber.object_id.to_s(16)} timer_id=#{timer_id}, interval=#{interval}" }
|
347
|
+
task.reactor << task.fiber
|
348
|
+
end
|
349
|
+
|
350
|
+
def wait_timer(timeout)
|
351
|
+
begin
|
352
|
+
@monitor.wait(timeout)
|
353
|
+
false
|
354
|
+
rescue Monitor::Cancelled => e
|
355
|
+
dbg { "#{self.class}#wait_timer cancelled #{e.class} #{e.message}" }
|
356
|
+
true
|
357
|
+
end
|
358
|
+
end
|
359
|
+
|
360
|
+
end
|
361
|
+
|
362
|
+
class Implementations
|
363
|
+
include WithDbg
|
364
|
+
|
365
|
+
def initialize
|
366
|
+
dbg { "#{self.class}#initialize" }
|
367
|
+
|
368
|
+
default_variables
|
369
|
+
end
|
370
|
+
|
371
|
+
def start
|
372
|
+
dbg { "#{self.class}#start" }
|
373
|
+
|
374
|
+
register_implementations
|
375
|
+
end
|
376
|
+
|
377
|
+
def stop
|
378
|
+
dbg { "#{self.class}#stop" }
|
379
|
+
|
380
|
+
@handles.each(&:unregister)
|
381
|
+
@timers.each(&:unregister)
|
382
|
+
|
383
|
+
default_variables
|
384
|
+
end
|
385
|
+
|
386
|
+
def print_debug_info
|
387
|
+
str = [
|
388
|
+
"#{self.class}:0x#{object_id.to_s(16)}",
|
389
|
+
"handles = [",
|
390
|
+
@handles.map(&:to_s).join("\n"),
|
391
|
+
"]",
|
392
|
+
"timers = [",
|
393
|
+
@timers.map(&:to_s).join("\n"),
|
394
|
+
"]"
|
395
|
+
].join("\n")
|
396
|
+
Libvirt.logger&.debug { str }
|
397
|
+
end
|
398
|
+
|
399
|
+
def to_s
|
400
|
+
"#<#{self.class}:0x#{object_id.to_s(16)} handles=#{@handles} timers=#{@timers}>"
|
401
|
+
end
|
402
|
+
|
403
|
+
def inspect
|
404
|
+
to_s
|
405
|
+
end
|
406
|
+
|
407
|
+
private
|
408
|
+
|
409
|
+
def default_variables
|
410
|
+
@next_handle_id = 1
|
411
|
+
@next_timer_id = 1
|
412
|
+
@handles = []
|
413
|
+
@timers = []
|
414
|
+
end
|
415
|
+
|
416
|
+
def register_implementations
|
417
|
+
dbg { "#{self.class}#register_implementations" }
|
418
|
+
|
419
|
+
Libvirt::Event.register(
|
420
|
+
add_handle: method(:add_handle).to_proc,
|
421
|
+
update_handle: method(:update_handle).to_proc,
|
422
|
+
remove_handle: method(:remove_handle).to_proc,
|
423
|
+
add_timer: method(:add_timer).to_proc,
|
424
|
+
update_timer: method(:update_timer).to_proc,
|
425
|
+
remove_timer: method(:remove_timer).to_proc
|
426
|
+
)
|
427
|
+
end
|
428
|
+
|
429
|
+
def add_handle(fd, events, opaque)
|
430
|
+
# add a handle to be tracked by this object. The application is
|
431
|
+
# expected to maintain a list of internal handle IDs (integers); this
|
432
|
+
# callback *must* return the current handle_id. This handle_id is used
|
433
|
+
# both by libvirt to identify the handle (during an update or remove
|
434
|
+
# callback), and is also passed by the application into libvirt when
|
435
|
+
# dispatching an event. The application *must* also store the opaque
|
436
|
+
# data given by libvirt, and return it back to libvirt later
|
437
|
+
# (see remove_handle)
|
438
|
+
dbg { "#{self.class}#add_handle starts fd=#{fd}, events=#{events}" }
|
439
|
+
|
440
|
+
@next_handle_id += 1
|
441
|
+
handle_id = @next_handle_id
|
442
|
+
handle = LibvirtAsync::Handle.new(handle_id, fd, events, opaque)
|
443
|
+
@handles << handle
|
444
|
+
handle.register
|
445
|
+
|
446
|
+
dbg { "#{self.class}#add_handle ends fd=#{fd}, events=#{events}" }
|
447
|
+
handle_id
|
448
|
+
end
|
449
|
+
|
450
|
+
def update_handle(handle_id, events)
|
451
|
+
# update a previously registered handle. Libvirt tells us the handle_id
|
452
|
+
# (which was returned to libvirt via add_handle), and the new events. It
|
453
|
+
# is our responsibility to find the correct handle and update the events
|
454
|
+
# it cares about
|
455
|
+
dbg { "#{self.class}#update_handle starts handle_id=#{handle_id}, events=#{events}" }
|
456
|
+
|
457
|
+
handle = @handles.detect { |h| h.handle_id == handle_id }
|
458
|
+
handle.events = events
|
459
|
+
handle.unregister
|
460
|
+
handle.register
|
461
|
+
|
462
|
+
dbg { "#{self.class}#update_handle ends handle_id=#{handle_id}, events=#{events}" }
|
463
|
+
nil
|
464
|
+
end
|
465
|
+
|
466
|
+
def remove_handle(handle_id)
|
467
|
+
# remove a previously registered handle. Libvirt tells us the handle_id
|
468
|
+
# (which was returned to libvirt via add_handle), and it is our
|
469
|
+
# responsibility to "forget" the handle. We must return the opaque data
|
470
|
+
# that libvirt handed us in "add_handle", otherwise we will leak memory
|
471
|
+
dbg { "#{self.class}#remove_handle starts handle_id=#{handle_id}" }
|
472
|
+
|
473
|
+
idx = @handles.index { |h| h.handle_id == handle_id }
|
474
|
+
handle = @handles.delete_at(idx)
|
475
|
+
handle.unregister
|
476
|
+
|
477
|
+
dbg { "#{self.class}#remove_handle starts handle_id=#{handle_id}" }
|
478
|
+
handle.opaque
|
479
|
+
end
|
480
|
+
|
481
|
+
def add_timer(interval, opaque)
|
482
|
+
# add a timeout to be tracked by this object. The application is
|
483
|
+
# expected to maintain a list of internal timer IDs (integers); this
|
484
|
+
# callback *must* return the current timer_id. This timer_id is used
|
485
|
+
# both by libvirt to identify the timeout (during an update or remove
|
486
|
+
# callback), and is also passed by the application into libvirt when
|
487
|
+
# dispatching an event. The application *must* also store the opaque
|
488
|
+
# data given by libvirt, and return it back to libvirt later
|
489
|
+
# (see remove_timer)
|
490
|
+
dbg { "#{self.class}#add_timer starts interval=#{interval}" }
|
491
|
+
|
492
|
+
@next_timer_id += 1
|
493
|
+
timer_id = @next_timer_id
|
494
|
+
timer = LibvirtAsync::Timer.new(timer_id, interval, opaque)
|
495
|
+
@timers << timer
|
496
|
+
timer.register
|
497
|
+
|
498
|
+
dbg { "#{self.class}#add_timer ends interval=#{interval}" }
|
499
|
+
timer_id
|
500
|
+
end
|
501
|
+
|
502
|
+
def update_timer(timer_id, interval)
|
503
|
+
# update a previously registered timer. Libvirt tells us the timer_id
|
504
|
+
# (which was returned to libvirt via add_timer), and the new interval. It
|
505
|
+
# is our responsibility to find the correct timer and update the timers
|
506
|
+
# it cares about
|
507
|
+
dbg { "#{self.class}#update_timer starts timer_id=#{timer_id}, interval=#{interval}" }
|
508
|
+
|
509
|
+
timer = @timers.detect { |t| t.timer_id == timer_id }
|
510
|
+
dbg { "#{self.class}#update_timer updating timer_id=#{timer.timer_id}" }
|
511
|
+
timer.interval = interval
|
512
|
+
timer.unregister
|
513
|
+
timer.register
|
514
|
+
|
515
|
+
dbg { "#{self.class}#update_timer ends timer_id=#{timer_id}, interval=#{interval}" }
|
516
|
+
nil
|
517
|
+
end
|
518
|
+
|
519
|
+
def remove_timer(timer_id)
|
520
|
+
# remove a previously registered timeout. Libvirt tells us the timer_id
|
521
|
+
# (which was returned to libvirt via add_timer), and it is our
|
522
|
+
# responsibility to "forget" the timer. We must return the opaque data
|
523
|
+
# that libvirt handed us in "add_timer", otherwise we will leak memory
|
524
|
+
dbg { "#{self.class}#remove_timer starts timer_id=#{timer_id}" }
|
525
|
+
|
526
|
+
idx = @timers.index { |t| t.timer_id == timer_id }
|
527
|
+
timer = @timers.delete_at(idx)
|
528
|
+
timer.unregister
|
529
|
+
|
530
|
+
dbg { "#{self.class}#remove_timer ends timer_id=#{timer_id}" }
|
531
|
+
timer.opaque
|
532
|
+
end
|
533
|
+
end
|
534
|
+
end
|
@@ -0,0 +1,38 @@
|
|
1
|
+
class LogFormatter
|
2
|
+
LOG_FORMAT = "%s, %s [%s/%s/%s] %s\n".freeze
|
3
|
+
DEFAULT_DATETIME_FORMAT = "%F %T.%N".freeze
|
4
|
+
|
5
|
+
attr_accessor :datetime_format
|
6
|
+
|
7
|
+
def initialize
|
8
|
+
@datetime_format = nil
|
9
|
+
end
|
10
|
+
|
11
|
+
def call(severity, time, progname, message)
|
12
|
+
LOG_FORMAT % [
|
13
|
+
severity[0..0],
|
14
|
+
format_datetime(time),
|
15
|
+
"0x#{Async::Task.current?&.object_id&.to_s(16)}",
|
16
|
+
"0x#{Fiber.current.object_id.to_s(16)}",
|
17
|
+
progname,
|
18
|
+
format_message(message)
|
19
|
+
]
|
20
|
+
end
|
21
|
+
|
22
|
+
private
|
23
|
+
|
24
|
+
def format_datetime(time)
|
25
|
+
time.strftime(@datetime_format || DEFAULT_DATETIME_FORMAT)
|
26
|
+
end
|
27
|
+
|
28
|
+
def format_message(message)
|
29
|
+
case message
|
30
|
+
when ::String
|
31
|
+
message
|
32
|
+
when ::Exception
|
33
|
+
"<#{message.class}>:#{message.message}\n#{(message.backtrace || []).join("\n")}"
|
34
|
+
else
|
35
|
+
message.inspect
|
36
|
+
end
|
37
|
+
end
|
38
|
+
end
|
@@ -0,0 +1,60 @@
|
|
1
|
+
#!/usr/bin/env ruby
|
2
|
+
|
3
|
+
require 'bundler/setup'
|
4
|
+
require 'libvirt'
|
5
|
+
require 'logger'
|
6
|
+
require 'active_support/all'
|
7
|
+
require 'async'
|
8
|
+
require 'get_process_mem'
|
9
|
+
|
10
|
+
require_relative 'support/libvirt_async'
|
11
|
+
require_relative 'support/log_formatter'
|
12
|
+
|
13
|
+
Libvirt.logger = Logger.new(STDOUT, formatter: LogFormatter.new)
|
14
|
+
Libvirt.logger.level = ENV['DEBUG'] ? :debug : :info
|
15
|
+
|
16
|
+
IMPL = LibvirtAsync::Implementations.new
|
17
|
+
CONNS = []
|
18
|
+
DOMS = []
|
19
|
+
|
20
|
+
Async do
|
21
|
+
ASYNC_REACTOR = Async::Task.current.reactor
|
22
|
+
|
23
|
+
IMPL.start
|
24
|
+
|
25
|
+
c = Libvirt::Connection.new('qemu+tcp://localhost:16510/system')
|
26
|
+
c.open
|
27
|
+
res = c.set_keep_alive(2, 1)
|
28
|
+
Libvirt.logger.info { "set_keep_alive #{res}" }
|
29
|
+
CONNS.push(c)
|
30
|
+
|
31
|
+
c.register_domain_event_callback(Libvirt::DOMAIN_EVENT_ID_LIFECYCLE, nil) do |dom, event, detail, opaque|
|
32
|
+
Libvirt.logger.info { "DOMAIN_EVENT_ID_LIFECYCLE user dom=#{dom}, event=#{event}, detail=#{detail}, opaque=#{opaque}" }
|
33
|
+
end
|
34
|
+
|
35
|
+
puts "domains qty #{c.list_all_domains_qty}"
|
36
|
+
|
37
|
+
domains = c.list_all_domains
|
38
|
+
DOMS.concat(domains)
|
39
|
+
puts "Domains (#{domains.size}): #{domains}"
|
40
|
+
|
41
|
+
domains.each_with_index do |domain, index|
|
42
|
+
c.register_domain_event_callback(Libvirt::DOMAIN_EVENT_ID_LIFECYCLE, domain) do |dom, event, detail, opaque|
|
43
|
+
Libvirt.logger.info { "DOMAIN_EVENT_CALLBACK LIFECYCLE user##{index} dom=#{dom}, event=#{event}, detail=#{detail}, opaque=#{opaque}" }
|
44
|
+
end
|
45
|
+
end
|
46
|
+
|
47
|
+
res = domains.first.get_state
|
48
|
+
Libvirt.logger.info { "Domain #{domains.first} state #{res}" }
|
49
|
+
|
50
|
+
# ASYNC_REACTOR.every(10) do
|
51
|
+
# LibvirtAsync::Util.create_task(nil, ASYNC_REACTOR) { IMPL.print_debug_info }.run
|
52
|
+
# end
|
53
|
+
|
54
|
+
ASYNC_REACTOR.every(5) do
|
55
|
+
Libvirt.logger.info { "MEM USAGE: #{GetProcessMem.new.mb} MB" }
|
56
|
+
Libvirt.logger.info { "GC.start" }
|
57
|
+
GC.start
|
58
|
+
Libvirt.logger.info { "MEM USAGE: #{GetProcessMem.new.mb} MB" }
|
59
|
+
end
|
60
|
+
end
|