work_shaper 0.1.0
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +7 -0
- data/CHANGELOG.md +5 -0
- data/Gemfile +12 -0
- data/LICENSE +21 -0
- data/LICENSE.txt +21 -0
- data/README.md +92 -0
- data/Rakefile +12 -0
- data/lib/work_shaper/manager.rb +182 -0
- data/lib/work_shaper/version.rb +5 -0
- data/lib/work_shaper/worker.rb +54 -0
- data/lib/work_shaper.rb +29 -0
- data/work_shaper.gemspec +39 -0
- metadata +73 -0
checksums.yaml
ADDED
@@ -0,0 +1,7 @@
|
|
1
|
+
---
|
2
|
+
SHA256:
|
3
|
+
metadata.gz: 5f008d410f5dc96fc854d8673b38873eacb66d176768d8c8ab8b78107a4b4837
|
4
|
+
data.tar.gz: 621935ffcad5cd2db1b138a2fafeab86eeb5c86138437756a56768cae698a1ff
|
5
|
+
SHA512:
|
6
|
+
metadata.gz: 2119feac5a3f1d5e56d69b572a8b764558625d68da7c63d7b7fa4e2747e92a204013fea4247cd5502317fed446ebb7f8826b130f9f15437895de2547a4c927d2
|
7
|
+
data.tar.gz: 591f2a06d61f872e1d194b73dfac057f31b34f903512ba48b6864e9280d71b27ed83004e95274d4dd029ce978853f0dd81b126c5b5eb8e28014a3e9c169f89be
|
data/CHANGELOG.md
ADDED
data/Gemfile
ADDED
data/LICENSE
ADDED
@@ -0,0 +1,21 @@
|
|
1
|
+
MIT License
|
2
|
+
|
3
|
+
Copyright (c) 2023 Broadvoice
|
4
|
+
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
7
|
+
in the Software without restriction, including without limitation the rights
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
10
|
+
furnished to do so, subject to the following conditions:
|
11
|
+
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
13
|
+
copies or substantial portions of the Software.
|
14
|
+
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
21
|
+
SOFTWARE.
|
data/LICENSE.txt
ADDED
@@ -0,0 +1,21 @@
|
|
1
|
+
The MIT License (MIT)
|
2
|
+
|
3
|
+
Copyright (c) 2023 Jerry Fernholz
|
4
|
+
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
7
|
+
in the Software without restriction, including without limitation the rights
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
10
|
+
furnished to do so, subject to the following conditions:
|
11
|
+
|
12
|
+
The above copyright notice and this permission notice shall be included in
|
13
|
+
all copies or substantial portions of the Software.
|
14
|
+
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
21
|
+
THE SOFTWARE.
|
data/README.md
ADDED
@@ -0,0 +1,92 @@
|
|
1
|
+
# WorkShaper
|
2
|
+
|
3
|
+
WorkShaper is inspired by Kafka partitions and offsets, but could be used to organize and
|
4
|
+
parallelize other forms of work. The original goal was to parallelize processing offsets in
|
5
|
+
a given partition while maintaining order for a subset of the messages based on Sub Keys.
|
6
|
+
|
7
|
+
The key concepts include Sub Key, Partition, and Offset. Work on a given Sub Key must be
|
8
|
+
executed in the order in which it is enqueued. However, work on different Sub Keys can run
|
9
|
+
in parallel. All Work (offsets) on a given Partition must be Acknowledged in continuous
|
10
|
+
monotonically increasing order. If a higher offset's work is completed before a lower offset,
|
11
|
+
the Manager will hold the acknowledgement until all lower offsets are acknowledged. Remember,
|
12
|
+
work (offsets) for a given sub key are still processed in order.
|
13
|
+
|
14
|
+
## Installation
|
15
|
+
|
16
|
+
TODO: Replace `UPDATE_WITH_YOUR_GEM_NAME_PRIOR_TO_RELEASE_TO_RUBYGEMS_ORG` with your gem name right after releasing it to RubyGems.org. Please do not do it earlier due to security reasons. Alternatively, replace this section with instructions to install your gem from git if you don't plan to release to RubyGems.org.
|
17
|
+
|
18
|
+
Install the gem and add to the application's Gemfile by executing:
|
19
|
+
|
20
|
+
$ bundle add UPDATE_WITH_YOUR_GEM_NAME_PRIOR_TO_RELEASE_TO_RUBYGEMS_ORG
|
21
|
+
|
22
|
+
If bundler is not being used to manage dependencies, install the gem by executing:
|
23
|
+
|
24
|
+
$ gem install UPDATE_WITH_YOUR_GEM_NAME_PRIOR_TO_RELEASE_TO_RUBYGEMS_ORG
|
25
|
+
|
26
|
+
## Usage
|
27
|
+
|
28
|
+
### Example
|
29
|
+
|
30
|
+
```ruby
|
31
|
+
consumer = MyRdKafka.consumer(...)
|
32
|
+
|
33
|
+
# Called for each message
|
34
|
+
work = ->(message, _p, _o) do
|
35
|
+
MsgProcessor.process(message)
|
36
|
+
end
|
37
|
+
|
38
|
+
# Called each time `work` completes
|
39
|
+
done = ->(_m, _p, _o) {}
|
40
|
+
|
41
|
+
# Called periodically after work is complete to acknowledge the
|
42
|
+
# completed work. Completed offsets are queued and processed every
|
43
|
+
# 5 ms by the OffsetManager.
|
44
|
+
ack = ->(p, o) do
|
45
|
+
consumer.store_offset(ENV.fetch('TOPIC_NAME'), p, o)
|
46
|
+
rescue InvalidOffset => e
|
47
|
+
# On rebalance, RdKafka sets the offset to _INVALID for the consumer
|
48
|
+
# losing that offset. In this scenario InvalidOffset is expected
|
49
|
+
# and we should move on.
|
50
|
+
# TODO: This can probably be more elegantly handled.
|
51
|
+
end
|
52
|
+
|
53
|
+
# Call if an exception in encountered in `done`. It is important to
|
54
|
+
# understand `work` is being called in a sub thread, so the exception
|
55
|
+
# will not bubble up.
|
56
|
+
error = ->(e, m, p, o) do
|
57
|
+
logger.error "#{e} on #{p} #{o}"
|
58
|
+
@fatal_error = e
|
59
|
+
end
|
60
|
+
|
61
|
+
max_in_queue = ENV.fetch('MAX_THREAD_QUEUE_SIZE', 25)
|
62
|
+
|
63
|
+
work_shaper = WorkShaper::Manager.new(work, done, ack, error, max_in_queue)
|
64
|
+
|
65
|
+
@value_to_subkey = {}
|
66
|
+
max_sub_keys = ENV.fetch('MAX_SUB_KEYS', 100)
|
67
|
+
consumer.each_message do |message|
|
68
|
+
break if @fatal_error
|
69
|
+
|
70
|
+
sub_key = @value_to_subkey[message.payload['some attribute']] ||=
|
71
|
+
MurmurHash3::V32.str_hash(message.payload['some attribute']) % max_sub_keys
|
72
|
+
|
73
|
+
work_shaper.enqueue(
|
74
|
+
sub_key,
|
75
|
+
message,
|
76
|
+
message.partition,
|
77
|
+
message.offset
|
78
|
+
)
|
79
|
+
end
|
80
|
+
```
|
81
|
+
|
82
|
+
|
83
|
+
## Development
|
84
|
+
|
85
|
+
|
86
|
+
## Contributing
|
87
|
+
|
88
|
+
Bug reports and pull requests are welcome on GitHub at https://github.com/broadvoice/work-shaper.
|
89
|
+
|
90
|
+
## License
|
91
|
+
|
92
|
+
The gem is available as open source under the terms of the [MIT License](https://opensource.org/licenses/MIT).
|
data/Rakefile
ADDED
@@ -0,0 +1,182 @@
|
|
1
|
+
module WorkShaper
|
2
|
+
# The Manager is responsible for organizing the work to be done, triggering calls to acknowledge work done
|
3
|
+
# for each offset in monotonically increasing order (independent of the execution order), and gracefully
|
4
|
+
# cleaning up when `#shutdown` is called.
|
5
|
+
class Manager
|
6
|
+
# Several of the parameters here are Lambdas (not Proc). Note you can pass a method using
|
7
|
+
# `method(:some_method)` or a lambda directly `->{ puts 'Hello'}`.
|
8
|
+
#
|
9
|
+
# @param work [#call(message, partition, offset)] Lambda that we will call to execute work.
|
10
|
+
# @param on_done [#call(message, partition, offset)] Lambda that we call when work is done.
|
11
|
+
# @param ack [] Lambda we will call when it is safe to commit an offset. This is not the
|
12
|
+
# same as Done.
|
13
|
+
# @param on_error [#call(exception, message, partition, offset)] Lambda that we call if an
|
14
|
+
# error is encountered.
|
15
|
+
# @param max_in_queue [Integer] The maximum in flight jobs per Sub Key. This affects how many
|
16
|
+
# message could get replayed if your process crashes before the offsets are committed.
|
17
|
+
def initialize(work:, on_done:, ack:, on_error:, max_in_queue: 3,
|
18
|
+
heartbeat_period_sec: 60, offset_commit_period_ms: 5)
|
19
|
+
@work = work
|
20
|
+
@on_done = on_done
|
21
|
+
@ack = ack
|
22
|
+
@on_error = on_error
|
23
|
+
@workers = {}
|
24
|
+
@last_ack = {}
|
25
|
+
@received_offsets = {}
|
26
|
+
@completed_offsets = {}
|
27
|
+
@max_in_queue = max_in_queue
|
28
|
+
@semaphore = Mutex.new
|
29
|
+
@shutdown = false
|
30
|
+
|
31
|
+
@total_enqueued = 0
|
32
|
+
|
33
|
+
@heartbeat = Thread.new do
|
34
|
+
while true
|
35
|
+
report
|
36
|
+
sleep heartbeat_period_sec
|
37
|
+
end
|
38
|
+
rescue => e
|
39
|
+
WorkShaper.logger.warn({ message: 'Shutdown from Heartbeat', error: e })
|
40
|
+
shutdown
|
41
|
+
end
|
42
|
+
|
43
|
+
@offset_manager = Thread.new do
|
44
|
+
while true
|
45
|
+
@completed_offsets.each_key do |partition|
|
46
|
+
offset_ack(partition)
|
47
|
+
end
|
48
|
+
sleep offset_commit_period_ms / 1000.0
|
49
|
+
end
|
50
|
+
rescue => e
|
51
|
+
WorkShaper.logger.warn({ message: 'Shutdown from Offset Manager', error: e })
|
52
|
+
shutdown
|
53
|
+
end
|
54
|
+
end
|
55
|
+
|
56
|
+
# Enqueue a message to be worked on the given `sub_key`, `partition`, and `offset`.
|
57
|
+
def enqueue(sub_key, message, partition, offset)
|
58
|
+
raise StandardError, 'Shutting down' if @shutdown
|
59
|
+
pause_on_overrun
|
60
|
+
|
61
|
+
worker = nil
|
62
|
+
@semaphore.synchronize do
|
63
|
+
@total_enqueued += 1
|
64
|
+
(@received_offsets[partition] ||= SortedSet.new) << offset
|
65
|
+
|
66
|
+
worker =
|
67
|
+
@workers[sub_key] ||=
|
68
|
+
Worker.new(
|
69
|
+
@work,
|
70
|
+
@on_done,
|
71
|
+
method(:offset_ack),
|
72
|
+
@on_error,
|
73
|
+
@last_ack,
|
74
|
+
@completed_offsets,
|
75
|
+
@semaphore,
|
76
|
+
@max_in_queue
|
77
|
+
)
|
78
|
+
end
|
79
|
+
|
80
|
+
worker.enqueue(message, partition, offset)
|
81
|
+
end
|
82
|
+
|
83
|
+
# Flush any offsets for which work has been completed. Only lowest continuous run of
|
84
|
+
# offsets will be acknowledged. Any offset after a discontinuity will be replayed when
|
85
|
+
# the consumer restarts.
|
86
|
+
def flush(safe: true)
|
87
|
+
sleep 5
|
88
|
+
@completed_offsets.each_key do |k|
|
89
|
+
safe ? offset_ack(k) : offset_ack_unsafe(k)
|
90
|
+
end
|
91
|
+
end
|
92
|
+
|
93
|
+
# Output state of Last Acked and Pending Offset Ack's.
|
94
|
+
def report(detailed: false)
|
95
|
+
@semaphore.synchronize do
|
96
|
+
WorkShaper.logger.info(
|
97
|
+
{ message: 'Reporting', total_enqueued: @total_enqueued,
|
98
|
+
total_acked: @total_acked,
|
99
|
+
in_flight: (@total_enqueued.to_i - @total_acked.to_i),
|
100
|
+
last_acked_offsets: @last_ack,
|
101
|
+
worker_count: @workers.keys.count
|
102
|
+
})
|
103
|
+
if detailed
|
104
|
+
WorkShaper.logger.info(
|
105
|
+
{
|
106
|
+
messaage: 'Reporting - Extra Detail',
|
107
|
+
pending_ack: @completed_offsets,
|
108
|
+
received_offsets: @received_offsets
|
109
|
+
})
|
110
|
+
end
|
111
|
+
end
|
112
|
+
end
|
113
|
+
|
114
|
+
# Stop the underlying threads
|
115
|
+
def shutdown
|
116
|
+
@shutdown = true
|
117
|
+
report(detailed: true)
|
118
|
+
Thread.kill(@heartbeat)
|
119
|
+
Thread.kill(@offset_manager)
|
120
|
+
@workers.each_value(&:shutdown)
|
121
|
+
end
|
122
|
+
|
123
|
+
private
|
124
|
+
|
125
|
+
def offset_ack(partition)
|
126
|
+
@semaphore.synchronize do
|
127
|
+
offset_ack_unsafe(partition)
|
128
|
+
end
|
129
|
+
end
|
130
|
+
|
131
|
+
def offset_ack_unsafe(partition)
|
132
|
+
@total_acked ||= 0
|
133
|
+
|
134
|
+
completed = @completed_offsets[partition]
|
135
|
+
received = @received_offsets[partition]
|
136
|
+
|
137
|
+
offset = completed.first
|
138
|
+
while received.any? && received.first == offset
|
139
|
+
# We observed Kafka sending the same message twice, even after
|
140
|
+
# having committed the offset. Here we skip this offset if we
|
141
|
+
# know it has already been committed.
|
142
|
+
last_offset = @last_ack[partition]
|
143
|
+
if last_offset && offset <= last_offset
|
144
|
+
WorkShaper.logger.warn(
|
145
|
+
{ message: 'Received Dupilcate Offset',
|
146
|
+
offset: "#{partition}:#{offset}"
|
147
|
+
})
|
148
|
+
else
|
149
|
+
result = @ack.call(partition, offset)
|
150
|
+
if result.is_a? Exception
|
151
|
+
WorkShaper.logger.warn(
|
152
|
+
{ message: 'Failed to Ack Offset, likely re-balance',
|
153
|
+
offset: "#{partition}:#{offset}",
|
154
|
+
completed: @completed_offsets[partition].to_a[0..10].join(','),
|
155
|
+
received: @received_offsets[partition].to_a[0..10].join(',')
|
156
|
+
})
|
157
|
+
else
|
158
|
+
@total_acked += 1
|
159
|
+
@last_ack[partition] = offset
|
160
|
+
end
|
161
|
+
end
|
162
|
+
|
163
|
+
completed.delete(offset)
|
164
|
+
received.delete(offset)
|
165
|
+
|
166
|
+
offset = completed.first
|
167
|
+
end
|
168
|
+
end
|
169
|
+
|
170
|
+
def pause_on_overrun
|
171
|
+
overrun = lambda do
|
172
|
+
@total_enqueued.to_i - @total_acked.to_i > @max_in_queue
|
173
|
+
end
|
174
|
+
|
175
|
+
# We have to be careful here to avoid a deadlock. Another thread may be waiting
|
176
|
+
# for the mutex to ack and remove offsets. If we wrap enqueue in a synchronize
|
177
|
+
# block, that would lead to a deadlock. Here the sleep allows other threads
|
178
|
+
# to wrap up.
|
179
|
+
sleep 0.005 while @semaphore.synchronize { overrun.call }
|
180
|
+
end
|
181
|
+
end
|
182
|
+
end
|
@@ -0,0 +1,54 @@
|
|
1
|
+
module WorkShaper
|
2
|
+
# The worker that runs the stuff
|
3
|
+
class Worker
|
4
|
+
# rubocop:disable Metrics/ParameterLists
|
5
|
+
# rubocop:disable Layout/LineLength
|
6
|
+
# @param work [Lambda] Lambda that we will #call(message) to execute work.
|
7
|
+
# @param on_done [Lambda] Lambda that we #call(partition, offset) when work is done.
|
8
|
+
# @param on_error [Lambda] Lambda that we #call(exception) if an error is encountered.
|
9
|
+
def initialize(work, on_done, ack_handler, on_error, last_ack, offset_stack, semaphore, max_in_queue)
|
10
|
+
@jobs = []
|
11
|
+
@work = work
|
12
|
+
@on_done = on_done
|
13
|
+
@ack_handler = ack_handler
|
14
|
+
@on_error = on_error
|
15
|
+
@last_ack = last_ack
|
16
|
+
@completed_offsets = offset_stack
|
17
|
+
@semaphore = semaphore
|
18
|
+
@max_in_queue = max_in_queue
|
19
|
+
@thread_pool = Concurrent::FixedThreadPool.new(1, auto_terminate: false)
|
20
|
+
end
|
21
|
+
|
22
|
+
# rubocop:enable Metrics/ParameterLists
|
23
|
+
# rubocop:enable Layout/LineLength
|
24
|
+
|
25
|
+
def enqueue(message, partition, offset)
|
26
|
+
# rubocop:disable Style/RescueStandardError
|
27
|
+
@thread_pool.post do
|
28
|
+
@work.call(message, partition, offset)
|
29
|
+
@on_done.call(message, partition, offset)
|
30
|
+
@semaphore.synchronize do
|
31
|
+
(@completed_offsets[partition] ||= SortedSet.new) << offset
|
32
|
+
end
|
33
|
+
# @ack_handler.call(partition, offset)
|
34
|
+
rescue => e
|
35
|
+
puts("Error processing #{partition}:#{offset} #{e}")
|
36
|
+
puts(e.backtrace.join("\n"))
|
37
|
+
# logger.error("Acking it anyways, why not?")
|
38
|
+
@on_error.call(e, message, partition, offset)
|
39
|
+
# @ack_handler.call(partition, offset)
|
40
|
+
end
|
41
|
+
# rubocop:enable Style/RescueStandardError
|
42
|
+
end
|
43
|
+
|
44
|
+
def shutdown
|
45
|
+
# Cannot call logger from trap{}
|
46
|
+
WorkShaper.logger.info({message: 'Shutting down worker'})
|
47
|
+
@thread_pool.shutdown
|
48
|
+
@thread_pool.wait_for_termination
|
49
|
+
sleep 0.05 while @thread_pool.queue_length.positive?
|
50
|
+
end
|
51
|
+
|
52
|
+
private
|
53
|
+
end
|
54
|
+
end
|
data/lib/work_shaper.rb
ADDED
@@ -0,0 +1,29 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require_relative "work_shaper/version"
|
4
|
+
require_relative "work_shaper/manager"
|
5
|
+
require_relative "work_shaper/worker"
|
6
|
+
|
7
|
+
# WorkShaper is inspired by Kafka partitions and offsets, but could be used to organize and
|
8
|
+
# parallelize other forms of work. The original goal was to parallelize processing offsets in
|
9
|
+
# a given partition while maintaining order for a subset of the messages based on Sub Keys.
|
10
|
+
#
|
11
|
+
# The key concepts include Sub Key, Partition, and Offset. Work on a given Sub Key must be
|
12
|
+
# executed in the order in which it is enqueued. However, work on different Sub Keys can run
|
13
|
+
# in parallel. All Work (offset) on a given Partition must be Acknowledged in continuous
|
14
|
+
# monotonically increasing order. If a higher offset's work is completed before a lower offset,
|
15
|
+
# the Manager will hold the acknowledgement until all lower offsets are acknowledged. Remember,
|
16
|
+
# work (offsets) for a given sub key are still processed in order.
|
17
|
+
module WorkShaper
|
18
|
+
def self.logger=(logger)
|
19
|
+
@logger = logger
|
20
|
+
end
|
21
|
+
def self.logger
|
22
|
+
@logger ||= Logger.new(
|
23
|
+
$stdout,
|
24
|
+
level: ENV['LOG_LEVEL'] || 'DEBUG',
|
25
|
+
formatter: Ruby::JSONFormatter::Base.new
|
26
|
+
)
|
27
|
+
end
|
28
|
+
end
|
29
|
+
|
data/work_shaper.gemspec
ADDED
@@ -0,0 +1,39 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require_relative "lib/work_shaper/version"
|
4
|
+
|
5
|
+
Gem::Specification.new do |spec|
|
6
|
+
spec.name = "work_shaper"
|
7
|
+
spec.version = WorkShaper::VERSION
|
8
|
+
spec.authors = ["Jerry Fernholz"]
|
9
|
+
spec.email = ["jerryf@broadvoice.com"]
|
10
|
+
|
11
|
+
spec.summary = "Parallelize work across many threads."
|
12
|
+
spec.description = "WorkShaper was built to parallelize the work needed to process Kafka messages."
|
13
|
+
spec.homepage = "https://github.com/broadvoice/work-shaper"
|
14
|
+
spec.license = "MIT"
|
15
|
+
spec.required_ruby_version = ">= 2.6.0"
|
16
|
+
|
17
|
+
spec.metadata["allowed_push_host"] = "https://rubygems.org"
|
18
|
+
|
19
|
+
spec.metadata["homepage_uri"] = spec.homepage
|
20
|
+
spec.metadata["source_code_uri"] = "https://github.com/broadvoice/work-shaper"
|
21
|
+
spec.metadata["changelog_uri"] = "https://github.com/broadvoice/work-shaper/blob/main/CHANGELOG.md"
|
22
|
+
|
23
|
+
# Specify which files should be added to the gem when it is released.
|
24
|
+
# The `git ls-files -z` loads the files in the RubyGem that have been added into git.
|
25
|
+
spec.files = Dir.chdir(__dir__) do
|
26
|
+
`git ls-files -z`.split("\x0").reject do |f|
|
27
|
+
(File.expand_path(f) == __FILE__) || f.start_with?(*%w[bin/ test/ spec/ features/ .git .circleci appveyor])
|
28
|
+
end
|
29
|
+
end
|
30
|
+
spec.bindir = "exe"
|
31
|
+
spec.executables = spec.files.grep(%r{\Aexe/}) { |f| File.basename(f) }
|
32
|
+
spec.require_paths = ["lib"]
|
33
|
+
|
34
|
+
# Uncomment to register a new dependency of your gem
|
35
|
+
spec.add_dependency "sorted_set", "~> 1.0"
|
36
|
+
|
37
|
+
# For more information and examples about making a new gem, check out our
|
38
|
+
# guide at: https://bundler.io/guides/creating_gem.html
|
39
|
+
end
|
metadata
ADDED
@@ -0,0 +1,73 @@
|
|
1
|
+
--- !ruby/object:Gem::Specification
|
2
|
+
name: work_shaper
|
3
|
+
version: !ruby/object:Gem::Version
|
4
|
+
version: 0.1.0
|
5
|
+
platform: ruby
|
6
|
+
authors:
|
7
|
+
- Jerry Fernholz
|
8
|
+
autorequire:
|
9
|
+
bindir: exe
|
10
|
+
cert_chain: []
|
11
|
+
date: 2024-01-02 00:00:00.000000000 Z
|
12
|
+
dependencies:
|
13
|
+
- !ruby/object:Gem::Dependency
|
14
|
+
name: sorted_set
|
15
|
+
requirement: !ruby/object:Gem::Requirement
|
16
|
+
requirements:
|
17
|
+
- - "~>"
|
18
|
+
- !ruby/object:Gem::Version
|
19
|
+
version: '1.0'
|
20
|
+
type: :runtime
|
21
|
+
prerelease: false
|
22
|
+
version_requirements: !ruby/object:Gem::Requirement
|
23
|
+
requirements:
|
24
|
+
- - "~>"
|
25
|
+
- !ruby/object:Gem::Version
|
26
|
+
version: '1.0'
|
27
|
+
description: WorkShaper was built to parallelize the work needed to process Kafka
|
28
|
+
messages.
|
29
|
+
email:
|
30
|
+
- jerryf@broadvoice.com
|
31
|
+
executables: []
|
32
|
+
extensions: []
|
33
|
+
extra_rdoc_files: []
|
34
|
+
files:
|
35
|
+
- CHANGELOG.md
|
36
|
+
- Gemfile
|
37
|
+
- LICENSE
|
38
|
+
- LICENSE.txt
|
39
|
+
- README.md
|
40
|
+
- Rakefile
|
41
|
+
- lib/work_shaper.rb
|
42
|
+
- lib/work_shaper/manager.rb
|
43
|
+
- lib/work_shaper/version.rb
|
44
|
+
- lib/work_shaper/worker.rb
|
45
|
+
- work_shaper.gemspec
|
46
|
+
homepage: https://github.com/broadvoice/work-shaper
|
47
|
+
licenses:
|
48
|
+
- MIT
|
49
|
+
metadata:
|
50
|
+
allowed_push_host: https://rubygems.org
|
51
|
+
homepage_uri: https://github.com/broadvoice/work-shaper
|
52
|
+
source_code_uri: https://github.com/broadvoice/work-shaper
|
53
|
+
changelog_uri: https://github.com/broadvoice/work-shaper/blob/main/CHANGELOG.md
|
54
|
+
post_install_message:
|
55
|
+
rdoc_options: []
|
56
|
+
require_paths:
|
57
|
+
- lib
|
58
|
+
required_ruby_version: !ruby/object:Gem::Requirement
|
59
|
+
requirements:
|
60
|
+
- - ">="
|
61
|
+
- !ruby/object:Gem::Version
|
62
|
+
version: 2.6.0
|
63
|
+
required_rubygems_version: !ruby/object:Gem::Requirement
|
64
|
+
requirements:
|
65
|
+
- - ">="
|
66
|
+
- !ruby/object:Gem::Version
|
67
|
+
version: '0'
|
68
|
+
requirements: []
|
69
|
+
rubygems_version: 3.4.10
|
70
|
+
signing_key:
|
71
|
+
specification_version: 4
|
72
|
+
summary: Parallelize work across many threads.
|
73
|
+
test_files: []
|