concurrent-ruby 1.2.2 → 1.3.4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGELOG.md +32 -0
- data/Gemfile +6 -6
- data/README.md +2 -0
- data/Rakefile +48 -25
- data/lib/concurrent-ruby/concurrent/array.rb +3 -3
- data/lib/concurrent-ruby/concurrent/collection/map/synchronized_map_backend.rb +23 -20
- data/lib/concurrent-ruby/concurrent/concurrent_ruby.jar +0 -0
- data/lib/concurrent-ruby/concurrent/executor/fixed_thread_pool.rb +4 -0
- data/lib/concurrent-ruby/concurrent/executor/java_executor_service.rb +4 -7
- data/lib/concurrent-ruby/concurrent/executor/java_thread_pool_executor.rb +5 -0
- data/lib/concurrent-ruby/concurrent/executor/ruby_thread_pool_executor.rb +7 -0
- data/lib/concurrent-ruby/concurrent/executor/timer_set.rb +6 -2
- data/lib/concurrent-ruby/concurrent/hash.rb +5 -3
- data/lib/concurrent-ruby/concurrent/map.rb +2 -2
- data/lib/concurrent-ruby/concurrent/promises.rb +33 -23
- data/lib/concurrent-ruby/concurrent/timer_task.rb +59 -9
- data/lib/concurrent-ruby/concurrent/utility/processor_counter.rb +116 -6
- data/lib/concurrent-ruby/concurrent/version.rb +1 -1
- metadata +2 -4
- data/lib/concurrent-ruby/concurrent/collection/map/atomic_reference_map_backend.rb +0 -927
- data/lib/concurrent-ruby/concurrent/thread_safe/util/cheap_lockable.rb +0 -81
@@ -5,6 +5,7 @@ require 'concurrent/collection/lock_free_stack'
|
|
5
5
|
require 'concurrent/configuration'
|
6
6
|
require 'concurrent/errors'
|
7
7
|
require 'concurrent/re_include'
|
8
|
+
require 'concurrent/utility/monotonic_time'
|
8
9
|
|
9
10
|
module Concurrent
|
10
11
|
|
@@ -22,7 +23,7 @@ module Concurrent
|
|
22
23
|
#
|
23
24
|
# @!macro promises.param.args
|
24
25
|
# @param [Object] args arguments which are passed to the task when it's executed.
|
25
|
-
# (It might be prepended with other arguments, see the @
|
26
|
+
# (It might be prepended with other arguments, see the @yield section).
|
26
27
|
#
|
27
28
|
# @!macro promises.shortcut.on
|
28
29
|
# Shortcut of {#$0_on} with default `:io` executor supplied.
|
@@ -63,8 +64,8 @@ module Concurrent
|
|
63
64
|
resolvable_event_on default_executor
|
64
65
|
end
|
65
66
|
|
66
|
-
#
|
67
|
-
# {Promises::ResolvableEvent#resolve}.
|
67
|
+
# Creates a resolvable event, user is responsible for resolving the event once
|
68
|
+
# by calling {Promises::ResolvableEvent#resolve}.
|
68
69
|
#
|
69
70
|
# @!macro promises.param.default_executor
|
70
71
|
# @return [ResolvableEvent]
|
@@ -94,7 +95,7 @@ module Concurrent
|
|
94
95
|
future_on(default_executor, *args, &task)
|
95
96
|
end
|
96
97
|
|
97
|
-
# Constructs new Future which will be resolved after block is evaluated on default executor.
|
98
|
+
# Constructs a new Future which will be resolved after block is evaluated on default executor.
|
98
99
|
# Evaluation begins immediately.
|
99
100
|
#
|
100
101
|
# @!macro promises.param.default_executor
|
@@ -106,7 +107,7 @@ module Concurrent
|
|
106
107
|
ImmediateEventPromise.new(default_executor).future.then(*args, &task)
|
107
108
|
end
|
108
109
|
|
109
|
-
# Creates resolved future with will be either fulfilled with the given value or
|
110
|
+
# Creates a resolved future with will be either fulfilled with the given value or rejected with
|
110
111
|
# the given reason.
|
111
112
|
#
|
112
113
|
# @param [true, false] fulfilled
|
@@ -118,7 +119,7 @@ module Concurrent
|
|
118
119
|
ImmediateFuturePromise.new(default_executor, fulfilled, value, reason).future
|
119
120
|
end
|
120
121
|
|
121
|
-
# Creates resolved future
|
122
|
+
# Creates a resolved future which will be fulfilled with the given value.
|
122
123
|
#
|
123
124
|
# @!macro promises.param.default_executor
|
124
125
|
# @param [Object] value
|
@@ -127,7 +128,7 @@ module Concurrent
|
|
127
128
|
resolved_future true, value, nil, default_executor
|
128
129
|
end
|
129
130
|
|
130
|
-
# Creates resolved future
|
131
|
+
# Creates a resolved future which will be rejected with the given reason.
|
131
132
|
#
|
132
133
|
# @!macro promises.param.default_executor
|
133
134
|
# @param [Object] reason
|
@@ -190,7 +191,7 @@ module Concurrent
|
|
190
191
|
delay_on default_executor, *args, &task
|
191
192
|
end
|
192
193
|
|
193
|
-
# Creates new event or future which is resolved only after it is touched,
|
194
|
+
# Creates a new event or future which is resolved only after it is touched,
|
194
195
|
# see {Concurrent::AbstractEventFuture#touch}.
|
195
196
|
#
|
196
197
|
# @!macro promises.param.default_executor
|
@@ -214,7 +215,7 @@ module Concurrent
|
|
214
215
|
schedule_on default_executor, intended_time, *args, &task
|
215
216
|
end
|
216
217
|
|
217
|
-
# Creates new event or future which is resolved in intended_time.
|
218
|
+
# Creates a new event or future which is resolved in intended_time.
|
218
219
|
#
|
219
220
|
# @!macro promises.param.default_executor
|
220
221
|
# @!macro promises.param.intended_time
|
@@ -240,8 +241,8 @@ module Concurrent
|
|
240
241
|
zip_futures_on default_executor, *futures_and_or_events
|
241
242
|
end
|
242
243
|
|
243
|
-
# Creates new future which is resolved after all futures_and_or_events are resolved.
|
244
|
-
# Its value is array of zipped future values. Its reason is array of reasons for rejection.
|
244
|
+
# Creates a new future which is resolved after all futures_and_or_events are resolved.
|
245
|
+
# Its value is an array of zipped future values. Its reason is an array of reasons for rejection.
|
245
246
|
# If there is an error it rejects.
|
246
247
|
# @!macro promises.event-conversion
|
247
248
|
# If event is supplied, which does not have value and can be only resolved, it's
|
@@ -262,7 +263,7 @@ module Concurrent
|
|
262
263
|
zip_events_on default_executor, *futures_and_or_events
|
263
264
|
end
|
264
265
|
|
265
|
-
# Creates new event which is resolved after all futures_and_or_events are resolved.
|
266
|
+
# Creates a new event which is resolved after all futures_and_or_events are resolved.
|
266
267
|
# (Future is resolved when fulfilled or rejected.)
|
267
268
|
#
|
268
269
|
# @!macro promises.param.default_executor
|
@@ -280,8 +281,8 @@ module Concurrent
|
|
280
281
|
|
281
282
|
alias_method :any, :any_resolved_future
|
282
283
|
|
283
|
-
# Creates new future which is resolved after first futures_and_or_events is resolved.
|
284
|
-
# Its result equals result of the first resolved future.
|
284
|
+
# Creates a new future which is resolved after the first futures_and_or_events is resolved.
|
285
|
+
# Its result equals the result of the first resolved future.
|
285
286
|
# @!macro promises.any-touch
|
286
287
|
# If resolved it does not propagate {Concurrent::AbstractEventFuture#touch}, leaving delayed
|
287
288
|
# futures un-executed if they are not required any more.
|
@@ -300,9 +301,9 @@ module Concurrent
|
|
300
301
|
any_fulfilled_future_on default_executor, *futures_and_or_events
|
301
302
|
end
|
302
303
|
|
303
|
-
# Creates new future which is resolved after first
|
304
|
-
# Its result equals result of the first resolved future or if all futures_and_or_events reject,
|
305
|
-
# it has reason of the last
|
304
|
+
# Creates a new future which is resolved after the first futures_and_or_events is fulfilled.
|
305
|
+
# Its result equals the result of the first resolved future or if all futures_and_or_events reject,
|
306
|
+
# it has reason of the last rejected future.
|
306
307
|
# @!macro promises.any-touch
|
307
308
|
# @!macro promises.event-conversion
|
308
309
|
#
|
@@ -319,7 +320,7 @@ module Concurrent
|
|
319
320
|
any_event_on default_executor, *futures_and_or_events
|
320
321
|
end
|
321
322
|
|
322
|
-
# Creates new event which becomes resolved after first
|
323
|
+
# Creates a new event which becomes resolved after the first futures_and_or_events resolves.
|
323
324
|
# @!macro promises.any-touch
|
324
325
|
#
|
325
326
|
# @!macro promises.param.default_executor
|
@@ -611,7 +612,7 @@ module Concurrent
|
|
611
612
|
# @yieldparam [Object] value
|
612
613
|
# @yieldparam [Object] reason
|
613
614
|
def chain_on(executor, *args, &task)
|
614
|
-
ChainPromise.new_blocked_by1(self,
|
615
|
+
ChainPromise.new_blocked_by1(self, executor, executor, args, &task).future
|
615
616
|
end
|
616
617
|
|
617
618
|
# @return [String] Short string representation.
|
@@ -772,8 +773,17 @@ module Concurrent
|
|
772
773
|
@Lock.synchronize do
|
773
774
|
@Waiters.increment
|
774
775
|
begin
|
775
|
-
|
776
|
-
|
776
|
+
if timeout
|
777
|
+
start = Concurrent.monotonic_time
|
778
|
+
until resolved?
|
779
|
+
break if @Condition.wait(@Lock, timeout) == nil # nil means timeout
|
780
|
+
timeout -= (Concurrent.monotonic_time - start)
|
781
|
+
break if timeout <= 0
|
782
|
+
end
|
783
|
+
else
|
784
|
+
until resolved?
|
785
|
+
@Condition.wait(@Lock, timeout)
|
786
|
+
end
|
777
787
|
end
|
778
788
|
ensure
|
779
789
|
# JRuby may raise ConcurrencyError
|
@@ -1034,7 +1044,7 @@ module Concurrent
|
|
1034
1044
|
# @return [Future]
|
1035
1045
|
# @yield [value, *args] to the task.
|
1036
1046
|
def then_on(executor, *args, &task)
|
1037
|
-
ThenPromise.new_blocked_by1(self,
|
1047
|
+
ThenPromise.new_blocked_by1(self, executor, executor, args, &task).future
|
1038
1048
|
end
|
1039
1049
|
|
1040
1050
|
# @!macro promises.shortcut.on
|
@@ -1052,7 +1062,7 @@ module Concurrent
|
|
1052
1062
|
# @return [Future]
|
1053
1063
|
# @yield [reason, *args] to the task.
|
1054
1064
|
def rescue_on(executor, *args, &task)
|
1055
|
-
RescuePromise.new_blocked_by1(self,
|
1065
|
+
RescuePromise.new_blocked_by1(self, executor, executor, args, &task).future
|
1056
1066
|
end
|
1057
1067
|
|
1058
1068
|
# @!macro promises.method.zip
|
@@ -32,6 +32,17 @@ module Concurrent
|
|
32
32
|
# be tested separately then passed to the `TimerTask` for scheduling and
|
33
33
|
# running.
|
34
34
|
#
|
35
|
+
# A `TimerTask` supports two different types of interval calculations.
|
36
|
+
# A fixed delay will always wait the same amount of time between the
|
37
|
+
# completion of one task and the start of the next. A fixed rate will
|
38
|
+
# attempt to maintain a constant rate of execution regardless of the
|
39
|
+
# duration of the task. For example, if a fixed rate task is scheduled
|
40
|
+
# to run every 60 seconds but the task itself takes 10 seconds to
|
41
|
+
# complete, the next task will be scheduled to run 50 seconds after
|
42
|
+
# the start of the previous task. If the task takes 70 seconds to
|
43
|
+
# complete, the next task will be start immediately after the previous
|
44
|
+
# task completes. Tasks will not be executed concurrently.
|
45
|
+
#
|
35
46
|
# In some cases it may be necessary for a `TimerTask` to affect its own
|
36
47
|
# execution cycle. To facilitate this, a reference to the TimerTask instance
|
37
48
|
# is passed as an argument to the provided block every time the task is
|
@@ -74,6 +85,12 @@ module Concurrent
|
|
74
85
|
#
|
75
86
|
# #=> 'Boom!'
|
76
87
|
#
|
88
|
+
# @example Configuring `:interval_type` with either :fixed_delay or :fixed_rate, default is :fixed_delay
|
89
|
+
# task = Concurrent::TimerTask.new(execution_interval: 5, interval_type: :fixed_rate) do
|
90
|
+
# puts 'Boom!'
|
91
|
+
# end
|
92
|
+
# task.interval_type #=> :fixed_rate
|
93
|
+
#
|
77
94
|
# @example Last `#value` and `Dereferenceable` mixin
|
78
95
|
# task = Concurrent::TimerTask.new(
|
79
96
|
# dup_on_deref: true,
|
@@ -87,7 +104,7 @@ module Concurrent
|
|
87
104
|
#
|
88
105
|
# @example Controlling execution from within the block
|
89
106
|
# timer_task = Concurrent::TimerTask.new(execution_interval: 1) do |task|
|
90
|
-
# task.execution_interval.times{ print 'Boom! ' }
|
107
|
+
# task.execution_interval.to_i.times{ print 'Boom! ' }
|
91
108
|
# print "\n"
|
92
109
|
# task.execution_interval += 1
|
93
110
|
# if task.execution_interval > 5
|
@@ -96,7 +113,7 @@ module Concurrent
|
|
96
113
|
# end
|
97
114
|
# end
|
98
115
|
#
|
99
|
-
# timer_task.execute
|
116
|
+
# timer_task.execute
|
100
117
|
# #=> Boom!
|
101
118
|
# #=> Boom! Boom!
|
102
119
|
# #=> Boom! Boom! Boom!
|
@@ -152,18 +169,30 @@ module Concurrent
|
|
152
169
|
# Default `:execution_interval` in seconds.
|
153
170
|
EXECUTION_INTERVAL = 60
|
154
171
|
|
155
|
-
#
|
156
|
-
|
172
|
+
# Maintain the interval between the end of one execution and the start of the next execution.
|
173
|
+
FIXED_DELAY = :fixed_delay
|
174
|
+
|
175
|
+
# Maintain the interval between the start of one execution and the start of the next.
|
176
|
+
# If execution time exceeds the interval, the next execution will start immediately
|
177
|
+
# after the previous execution finishes. Executions will not run concurrently.
|
178
|
+
FIXED_RATE = :fixed_rate
|
179
|
+
|
180
|
+
# Default `:interval_type`
|
181
|
+
DEFAULT_INTERVAL_TYPE = FIXED_DELAY
|
157
182
|
|
158
183
|
# Create a new TimerTask with the given task and configuration.
|
159
184
|
#
|
160
185
|
# @!macro timer_task_initialize
|
161
186
|
# @param [Hash] opts the options defining task execution.
|
162
|
-
# @option opts [
|
187
|
+
# @option opts [Float] :execution_interval number of seconds between
|
163
188
|
# task executions (default: EXECUTION_INTERVAL)
|
164
189
|
# @option opts [Boolean] :run_now Whether to run the task immediately
|
165
190
|
# upon instantiation or to wait until the first # execution_interval
|
166
191
|
# has passed (default: false)
|
192
|
+
# @options opts [Symbol] :interval_type method to calculate the interval
|
193
|
+
# between executions, can be either :fixed_rate or :fixed_delay.
|
194
|
+
# (default: :fixed_delay)
|
195
|
+
# @option opts [Executor] executor, default is `global_io_executor`
|
167
196
|
#
|
168
197
|
# @!macro deref_options
|
169
198
|
#
|
@@ -242,6 +271,10 @@ module Concurrent
|
|
242
271
|
end
|
243
272
|
end
|
244
273
|
|
274
|
+
# @!attribute [r] interval_type
|
275
|
+
# @return [Symbol] method to calculate the interval between executions
|
276
|
+
attr_reader :interval_type
|
277
|
+
|
245
278
|
# @!attribute [rw] timeout_interval
|
246
279
|
# @return [Fixnum] Number of seconds the task can run before it is
|
247
280
|
# considered to have failed.
|
@@ -264,11 +297,17 @@ module Concurrent
|
|
264
297
|
set_deref_options(opts)
|
265
298
|
|
266
299
|
self.execution_interval = opts[:execution] || opts[:execution_interval] || EXECUTION_INTERVAL
|
300
|
+
if opts[:interval_type] && ![FIXED_DELAY, FIXED_RATE].include?(opts[:interval_type])
|
301
|
+
raise ArgumentError.new('interval_type must be either :fixed_delay or :fixed_rate')
|
302
|
+
end
|
267
303
|
if opts[:timeout] || opts[:timeout_interval]
|
268
304
|
warn 'TimeTask timeouts are now ignored as these were not able to be implemented correctly'
|
269
305
|
end
|
306
|
+
|
270
307
|
@run_now = opts[:now] || opts[:run_now]
|
271
|
-
@
|
308
|
+
@interval_type = opts[:interval_type] || DEFAULT_INTERVAL_TYPE
|
309
|
+
@task = Concurrent::SafeTaskExecutor.new(task)
|
310
|
+
@executor = opts[:executor] || Concurrent.global_io_executor
|
272
311
|
@running = Concurrent::AtomicBoolean.new(false)
|
273
312
|
@value = nil
|
274
313
|
|
@@ -289,17 +328,18 @@ module Concurrent
|
|
289
328
|
|
290
329
|
# @!visibility private
|
291
330
|
def schedule_next_task(interval = execution_interval)
|
292
|
-
ScheduledTask.execute(interval, args: [Concurrent::Event.new], &method(:execute_task))
|
331
|
+
ScheduledTask.execute(interval, executor: @executor, args: [Concurrent::Event.new], &method(:execute_task))
|
293
332
|
nil
|
294
333
|
end
|
295
334
|
|
296
335
|
# @!visibility private
|
297
336
|
def execute_task(completion)
|
298
337
|
return nil unless @running.true?
|
299
|
-
|
338
|
+
start_time = Concurrent.monotonic_time
|
339
|
+
_success, value, reason = @task.execute(self)
|
300
340
|
if completion.try?
|
301
341
|
self.value = value
|
302
|
-
schedule_next_task
|
342
|
+
schedule_next_task(calculate_next_interval(start_time))
|
303
343
|
time = Time.now
|
304
344
|
observers.notify_observers do
|
305
345
|
[time, self.value, reason]
|
@@ -307,5 +347,15 @@ module Concurrent
|
|
307
347
|
end
|
308
348
|
nil
|
309
349
|
end
|
350
|
+
|
351
|
+
# @!visibility private
|
352
|
+
def calculate_next_interval(start_time)
|
353
|
+
if @interval_type == FIXED_RATE
|
354
|
+
run_time = Concurrent.monotonic_time - start_time
|
355
|
+
[execution_interval - run_time, 0].max
|
356
|
+
else # FIXED_DELAY
|
357
|
+
execution_interval
|
358
|
+
end
|
359
|
+
end
|
310
360
|
end
|
311
361
|
end
|
@@ -11,6 +11,8 @@ module Concurrent
|
|
11
11
|
def initialize
|
12
12
|
@processor_count = Delay.new { compute_processor_count }
|
13
13
|
@physical_processor_count = Delay.new { compute_physical_processor_count }
|
14
|
+
@cpu_quota = Delay.new { compute_cpu_quota }
|
15
|
+
@cpu_shares = Delay.new { compute_cpu_shares }
|
14
16
|
end
|
15
17
|
|
16
18
|
def processor_count
|
@@ -21,6 +23,29 @@ module Concurrent
|
|
21
23
|
@physical_processor_count.value
|
22
24
|
end
|
23
25
|
|
26
|
+
def available_processor_count
|
27
|
+
cpu_count = processor_count.to_f
|
28
|
+
quota = cpu_quota
|
29
|
+
|
30
|
+
return cpu_count if quota.nil?
|
31
|
+
|
32
|
+
# cgroup cpus quotas have no limits, so they can be set to higher than the
|
33
|
+
# real count of cores.
|
34
|
+
if quota > cpu_count
|
35
|
+
cpu_count
|
36
|
+
else
|
37
|
+
quota
|
38
|
+
end
|
39
|
+
end
|
40
|
+
|
41
|
+
def cpu_quota
|
42
|
+
@cpu_quota.value
|
43
|
+
end
|
44
|
+
|
45
|
+
def cpu_shares
|
46
|
+
@cpu_shares.value
|
47
|
+
end
|
48
|
+
|
24
49
|
private
|
25
50
|
|
26
51
|
def compute_processor_count
|
@@ -48,10 +73,20 @@ module Concurrent
|
|
48
73
|
end
|
49
74
|
cores.count
|
50
75
|
when /mswin|mingw/
|
51
|
-
|
52
|
-
|
53
|
-
|
54
|
-
|
76
|
+
# Get-CimInstance introduced in PowerShell 3 or earlier: https://learn.microsoft.com/en-us/previous-versions/powershell/module/cimcmdlets/get-ciminstance?view=powershell-3.0
|
77
|
+
result = run('powershell -command "Get-CimInstance -ClassName Win32_Processor -Property NumberOfCores | Select-Object -Property NumberOfCores"')
|
78
|
+
if !result || $?.exitstatus != 0
|
79
|
+
# fallback to deprecated wmic for older systems
|
80
|
+
result = run("wmic cpu get NumberOfCores")
|
81
|
+
end
|
82
|
+
if !result || $?.exitstatus != 0
|
83
|
+
# Bail out if both commands returned something unexpected
|
84
|
+
processor_count
|
85
|
+
else
|
86
|
+
# powershell: "\nNumberOfCores\n-------------\n 4\n\n\n"
|
87
|
+
# wmic: "NumberOfCores \n\n4 \n\n\n\n"
|
88
|
+
result.scan(/\d+/).map(&:to_i).reduce(:+)
|
89
|
+
end
|
55
90
|
else
|
56
91
|
processor_count
|
57
92
|
end
|
@@ -60,6 +95,45 @@ module Concurrent
|
|
60
95
|
rescue
|
61
96
|
return 1
|
62
97
|
end
|
98
|
+
|
99
|
+
def run(command)
|
100
|
+
IO.popen(command, &:read)
|
101
|
+
rescue Errno::ENOENT
|
102
|
+
end
|
103
|
+
|
104
|
+
def compute_cpu_quota
|
105
|
+
if RbConfig::CONFIG["target_os"].include?("linux")
|
106
|
+
if File.exist?("/sys/fs/cgroup/cpu.max")
|
107
|
+
# cgroups v2: https://docs.kernel.org/admin-guide/cgroup-v2.html#cpu-interface-files
|
108
|
+
cpu_max = File.read("/sys/fs/cgroup/cpu.max")
|
109
|
+
return nil if cpu_max.start_with?("max ") # no limit
|
110
|
+
max, period = cpu_max.split.map(&:to_f)
|
111
|
+
max / period
|
112
|
+
elsif File.exist?("/sys/fs/cgroup/cpu,cpuacct/cpu.cfs_quota_us")
|
113
|
+
# cgroups v1: https://kernel.googlesource.com/pub/scm/linux/kernel/git/glommer/memcg/+/cpu_stat/Documentation/cgroups/cpu.txt
|
114
|
+
max = File.read("/sys/fs/cgroup/cpu,cpuacct/cpu.cfs_quota_us").to_i
|
115
|
+
# If the cpu.cfs_quota_us is -1, cgroup does not adhere to any CPU time restrictions
|
116
|
+
# https://docs.kernel.org/scheduler/sched-bwc.html#management
|
117
|
+
return nil if max <= 0
|
118
|
+
period = File.read("/sys/fs/cgroup/cpu,cpuacct/cpu.cfs_period_us").to_f
|
119
|
+
max / period
|
120
|
+
end
|
121
|
+
end
|
122
|
+
end
|
123
|
+
|
124
|
+
def compute_cpu_shares
|
125
|
+
if RbConfig::CONFIG["target_os"].include?("linux")
|
126
|
+
if File.exist?("/sys/fs/cgroup/cpu.weight")
|
127
|
+
# cgroups v2: https://docs.kernel.org/admin-guide/cgroup-v2.html#cpu-interface-files
|
128
|
+
# Ref: https://github.com/kubernetes/enhancements/tree/master/keps/sig-node/2254-cgroup-v2#phase-1-convert-from-cgroups-v1-settings-to-v2
|
129
|
+
weight = File.read("/sys/fs/cgroup/cpu.weight").to_f
|
130
|
+
((((weight - 1) * 262142) / 9999) + 2) / 1024
|
131
|
+
elsif File.exist?("/sys/fs/cgroup/cpu/cpu.shares")
|
132
|
+
# cgroups v1: https://kernel.googlesource.com/pub/scm/linux/kernel/git/glommer/memcg/+/cpu_stat/Documentation/cgroups/cpu.txt
|
133
|
+
File.read("/sys/fs/cgroup/cpu/cpu.shares").to_f / 1024
|
134
|
+
end
|
135
|
+
end
|
136
|
+
end
|
63
137
|
end
|
64
138
|
end
|
65
139
|
|
@@ -75,8 +149,8 @@ module Concurrent
|
|
75
149
|
# `java.lang.Runtime.getRuntime.availableProcessors` will be used. According
|
76
150
|
# to the Java documentation this "value may change during a particular
|
77
151
|
# invocation of the virtual machine... [applications] should therefore
|
78
|
-
# occasionally poll this property."
|
79
|
-
#
|
152
|
+
# occasionally poll this property." We still memoize this value once under
|
153
|
+
# JRuby.
|
80
154
|
#
|
81
155
|
# Otherwise Ruby's Etc.nprocessors will be used.
|
82
156
|
#
|
@@ -107,4 +181,40 @@ module Concurrent
|
|
107
181
|
def self.physical_processor_count
|
108
182
|
processor_counter.physical_processor_count
|
109
183
|
end
|
184
|
+
|
185
|
+
# Number of processors cores available for process scheduling.
|
186
|
+
# This method takes in account the CPU quota if the process is inside a cgroup with a
|
187
|
+
# dedicated CPU quota (typically Docker).
|
188
|
+
# Otherwise it returns the same value as #processor_count but as a Float.
|
189
|
+
#
|
190
|
+
# For performance reasons the calculated value will be memoized on the first
|
191
|
+
# call.
|
192
|
+
#
|
193
|
+
# @return [Float] number of available processors
|
194
|
+
def self.available_processor_count
|
195
|
+
processor_counter.available_processor_count
|
196
|
+
end
|
197
|
+
|
198
|
+
# The maximum number of processors cores available for process scheduling.
|
199
|
+
# Returns `nil` if there is no enforced limit, or a `Float` if the
|
200
|
+
# process is inside a cgroup with a dedicated CPU quota (typically Docker).
|
201
|
+
#
|
202
|
+
# Note that nothing prevents setting a CPU quota higher than the actual number of
|
203
|
+
# cores on the system.
|
204
|
+
#
|
205
|
+
# For performance reasons the calculated value will be memoized on the first
|
206
|
+
# call.
|
207
|
+
#
|
208
|
+
# @return [nil, Float] Maximum number of available processors as set by a cgroup CPU quota, or nil if none set
|
209
|
+
def self.cpu_quota
|
210
|
+
processor_counter.cpu_quota
|
211
|
+
end
|
212
|
+
|
213
|
+
# The CPU shares requested by the process. For performance reasons the calculated
|
214
|
+
# value will be memoized on the first call.
|
215
|
+
#
|
216
|
+
# @return [Float, nil] CPU shares requested by the process, or nil if not set
|
217
|
+
def self.cpu_shares
|
218
|
+
processor_counter.cpu_shares
|
219
|
+
end
|
110
220
|
end
|
metadata
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: concurrent-ruby
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 1.
|
4
|
+
version: 1.3.4
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Jerry D'Antonio
|
@@ -10,7 +10,7 @@ authors:
|
|
10
10
|
autorequire:
|
11
11
|
bindir: bin
|
12
12
|
cert_chain: []
|
13
|
-
date:
|
13
|
+
date: 2024-08-10 00:00:00.000000000 Z
|
14
14
|
dependencies: []
|
15
15
|
description: |
|
16
16
|
Modern concurrency tools including agents, futures, promises, thread pools, actors, supervisors, and more.
|
@@ -76,7 +76,6 @@ files:
|
|
76
76
|
- lib/concurrent-ruby/concurrent/collection/copy_on_write_observer_set.rb
|
77
77
|
- lib/concurrent-ruby/concurrent/collection/java_non_concurrent_priority_queue.rb
|
78
78
|
- lib/concurrent-ruby/concurrent/collection/lock_free_stack.rb
|
79
|
-
- lib/concurrent-ruby/concurrent/collection/map/atomic_reference_map_backend.rb
|
80
79
|
- lib/concurrent-ruby/concurrent/collection/map/mri_map_backend.rb
|
81
80
|
- lib/concurrent-ruby/concurrent/collection/map/non_concurrent_map_backend.rb
|
82
81
|
- lib/concurrent-ruby/concurrent/collection/map/synchronized_map_backend.rb
|
@@ -147,7 +146,6 @@ files:
|
|
147
146
|
- lib/concurrent-ruby/concurrent/thread_safe/synchronized_delegator.rb
|
148
147
|
- lib/concurrent-ruby/concurrent/thread_safe/util.rb
|
149
148
|
- lib/concurrent-ruby/concurrent/thread_safe/util/adder.rb
|
150
|
-
- lib/concurrent-ruby/concurrent/thread_safe/util/cheap_lockable.rb
|
151
149
|
- lib/concurrent-ruby/concurrent/thread_safe/util/data_structures.rb
|
152
150
|
- lib/concurrent-ruby/concurrent/thread_safe/util/power_of_two_tuple.rb
|
153
151
|
- lib/concurrent-ruby/concurrent/thread_safe/util/striped64.rb
|