redstream 0.0.1
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +7 -0
- data/.gitignore +14 -0
- data/.travis.yml +10 -0
- data/Gemfile +5 -0
- data/LICENSE.txt +22 -0
- data/README.md +253 -0
- data/Rakefile +9 -0
- data/docker-compose.yml +6 -0
- data/lib/redstream.rb +134 -0
- data/lib/redstream/consumer.rb +115 -0
- data/lib/redstream/delayer.rb +100 -0
- data/lib/redstream/lock.rb +80 -0
- data/lib/redstream/message.rb +52 -0
- data/lib/redstream/model.rb +57 -0
- data/lib/redstream/producer.rb +145 -0
- data/lib/redstream/trimmer.rb +91 -0
- data/lib/redstream/version.rb +5 -0
- data/redstream.gemspec +38 -0
- data/spec/redstream/consumer_spec.rb +90 -0
- data/spec/redstream/delayer_spec.rb +53 -0
- data/spec/redstream/lock_spec.rb +68 -0
- data/spec/redstream/model_spec.rb +57 -0
- data/spec/redstream/producer_spec.rb +79 -0
- data/spec/redstream/trimmer_spec.rb +32 -0
- data/spec/redstream_spec.rb +117 -0
- data/spec/spec_helper.rb +66 -0
- metadata +289 -0
@@ -0,0 +1,91 @@
|
|
1
|
+
|
2
|
+
module Redstream
|
3
|
+
# The Redstream::Trimmer class is neccessary to clean up messsages after all
|
4
|
+
# consumers have successfully processed and committed them. Otherwise they
|
5
|
+
# would fill up redis and finally bring redis down due to out of memory
|
6
|
+
# issues. The Trimmer will sleep for the specified interval in case there is
|
7
|
+
# nothing to trim. Please note that you must pass an array containing all
|
8
|
+
# consumer names reading from the stream which is about to be trimmed.
|
9
|
+
# Otherwise the Trimmer could trim messages from the stream before all
|
10
|
+
# consumers received the respective messages.
|
11
|
+
#
|
12
|
+
# @example
|
13
|
+
# trimmer = Redstream::Trimmer.new(
|
14
|
+
# interval: 30,
|
15
|
+
# stream_name: "users",
|
16
|
+
# consumer_names: ["indexer", "cacher"]
|
17
|
+
# )
|
18
|
+
#
|
19
|
+
# trimmer.run
|
20
|
+
|
21
|
+
class Trimmer
|
22
|
+
# Initializes a new trimmer. Accepts an interval to sleep for in case there
|
23
|
+
# is nothing to trim, the actual stream name, the consumer names as well as
|
24
|
+
# a logger for debug log messages.
|
25
|
+
#
|
26
|
+
# @param interval [Fixnum, Float] Specifies a time to sleep in case there is
|
27
|
+
# nothing to trim.
|
28
|
+
# @param stream_name [String] The name of the stream that should be trimmed.
|
29
|
+
# Please note, that redstream adds a prefix to the redis keys. However,
|
30
|
+
# the stream_name param must be specified without any prefixes here. When
|
31
|
+
# using Redstream::Model, the stream name is the downcased, pluralized
|
32
|
+
# and underscored version of the model name. I.e., the stream name for a
|
33
|
+
# 'User' model will be 'users'
|
34
|
+
# @params consumer_names [Array] The list of all consumers reading from the
|
35
|
+
# specified stream
|
36
|
+
# @param logger [Logger] A logger used for debug messages
|
37
|
+
|
38
|
+
def initialize(interval:, stream_name:, consumer_names:, logger: Logger.new("/dev/null"))
|
39
|
+
@interval = interval
|
40
|
+
@stream_name = stream_name
|
41
|
+
@consumer_names = consumer_names
|
42
|
+
@logger = logger
|
43
|
+
@lock = Lock.new(name: "trimmer:#{stream_name}")
|
44
|
+
end
|
45
|
+
|
46
|
+
# Loops and blocks forever trimming messages from the specified redis
|
47
|
+
# stream.
|
48
|
+
|
49
|
+
def run
|
50
|
+
loop { run_once }
|
51
|
+
end
|
52
|
+
|
53
|
+
# Runs the trimming a single time. You usually want to use the #run method
|
54
|
+
# instead, which loops/blocks forever.
|
55
|
+
|
56
|
+
def run_once
|
57
|
+
got_lock = @lock.acquire do
|
58
|
+
min_committed_id = Redstream.connection_pool.with do |redis|
|
59
|
+
offset_key_names = @consumer_names.map do |consumer_name|
|
60
|
+
Redstream.offset_key_name(stream_name: @stream_name, consumer_name: consumer_name)
|
61
|
+
end
|
62
|
+
|
63
|
+
redis.mget(offset_key_names).map(&:to_s).reject(&:empty?).min
|
64
|
+
end
|
65
|
+
|
66
|
+
return sleep(@interval) unless min_committed_id
|
67
|
+
|
68
|
+
loop do
|
69
|
+
messages = Redstream.connection_pool.with do |redis|
|
70
|
+
redis.xrange(Redstream.stream_key_name(@stream_name), "-", min_committed_id, count: 1_000)
|
71
|
+
end
|
72
|
+
|
73
|
+
return sleep(@interval) if messages.nil? || messages.empty?
|
74
|
+
|
75
|
+
Redstream.connection_pool.with { |redis| redis.xdel Redstream.stream_key_name(@stream_name), messages.map(&:first) }
|
76
|
+
|
77
|
+
@logger.debug "Trimmed #{messages.size} messages from #{@stream_name}"
|
78
|
+
end
|
79
|
+
end
|
80
|
+
|
81
|
+
sleep(5) unless got_lock
|
82
|
+
rescue => e
|
83
|
+
@logger.error e
|
84
|
+
|
85
|
+
sleep 5
|
86
|
+
|
87
|
+
retry
|
88
|
+
end
|
89
|
+
end
|
90
|
+
end
|
91
|
+
|
data/redstream.gemspec
ADDED
@@ -0,0 +1,38 @@
|
|
1
|
+
# coding: utf-8
|
2
|
+
lib = File.expand_path('../lib', __FILE__)
|
3
|
+
$LOAD_PATH.unshift(lib) unless $LOAD_PATH.include?(lib)
|
4
|
+
require 'redstream/version'
|
5
|
+
|
6
|
+
Gem::Specification.new do |spec|
|
7
|
+
spec.name = "redstream"
|
8
|
+
spec.version = Redstream::VERSION
|
9
|
+
spec.authors = ["Benjamin Vetter"]
|
10
|
+
spec.email = ["vetter@plainpicture.de"]
|
11
|
+
spec.summary = %q{Using redis streams to keep your primary database in sync with secondary datastores}
|
12
|
+
spec.description = %q{Using redis streams to keep your primary database in sync with secondary datastores}
|
13
|
+
spec.homepage = "https://github.com/mrkamel/redstream"
|
14
|
+
spec.license = "MIT"
|
15
|
+
|
16
|
+
spec.files = `git ls-files -z`.split("\x0")
|
17
|
+
spec.executables = spec.files.grep(%r{^bin/}) { |f| File.basename(f) }
|
18
|
+
spec.test_files = spec.files.grep(%r{^(test|spec|features)/})
|
19
|
+
spec.require_paths = ["lib"]
|
20
|
+
|
21
|
+
spec.add_development_dependency "bundler"
|
22
|
+
spec.add_development_dependency "rake"
|
23
|
+
spec.add_development_dependency "rspec"
|
24
|
+
spec.add_development_dependency "activerecord"
|
25
|
+
spec.add_development_dependency "database_cleaner"
|
26
|
+
spec.add_development_dependency "sqlite3", "1.3.13"
|
27
|
+
spec.add_development_dependency "factory_bot"
|
28
|
+
spec.add_development_dependency "timecop"
|
29
|
+
spec.add_development_dependency "concurrent-ruby"
|
30
|
+
spec.add_development_dependency "rspec-instafail"
|
31
|
+
spec.add_development_dependency "mocha"
|
32
|
+
|
33
|
+
spec.add_dependency "connection_pool"
|
34
|
+
spec.add_dependency "activesupport"
|
35
|
+
spec.add_dependency "redis", ">= 4.1.0"
|
36
|
+
spec.add_dependency "json"
|
37
|
+
end
|
38
|
+
|
@@ -0,0 +1,90 @@
|
|
1
|
+
|
2
|
+
require File.expand_path("../spec_helper", __dir__)
|
3
|
+
|
4
|
+
RSpec.describe Redstream::Consumer do
|
5
|
+
describe "#run_once" do
|
6
|
+
it "doesn't call the block without messages" do
|
7
|
+
called = false
|
8
|
+
|
9
|
+
Redstream::Consumer.new(name: "consumer", stream_name: "products", batch_size: 5).run_once do |batch|
|
10
|
+
called = true
|
11
|
+
end
|
12
|
+
|
13
|
+
expect(called).to eq(false)
|
14
|
+
end
|
15
|
+
|
16
|
+
it "is mutually exclusive" do
|
17
|
+
create :product
|
18
|
+
|
19
|
+
calls = Concurrent::AtomicFixnum.new(0)
|
20
|
+
|
21
|
+
threads = Array.new(2) do |i|
|
22
|
+
Thread.new do
|
23
|
+
Redstream::Consumer.new(name: "consumer", stream_name: "products", batch_size: 5).run_once do |batch|
|
24
|
+
calls.increment
|
25
|
+
|
26
|
+
sleep 1
|
27
|
+
end
|
28
|
+
end
|
29
|
+
end
|
30
|
+
|
31
|
+
threads.each(&:join)
|
32
|
+
|
33
|
+
expect(calls.value).to eq(1)
|
34
|
+
end
|
35
|
+
|
36
|
+
it "is using the existing offset" do
|
37
|
+
create_list(:product, 2)
|
38
|
+
|
39
|
+
all_messages = redis.xrange(Redstream.stream_key_name("products"), "-", "+")
|
40
|
+
|
41
|
+
expect(all_messages.size).to eq(2)
|
42
|
+
|
43
|
+
redis.set(Redstream.offset_key_name(stream_name: "products", consumer_name: "consumer"), all_messages.first[0])
|
44
|
+
|
45
|
+
messages = nil
|
46
|
+
|
47
|
+
consumer = Redstream::Consumer.new(name: "consumer", stream_name: "products")
|
48
|
+
|
49
|
+
consumer.run_once do |batch|
|
50
|
+
messages = batch
|
51
|
+
end
|
52
|
+
|
53
|
+
expect(messages.size).to eq(1)
|
54
|
+
expect(messages.first.raw_message).to eq(all_messages.last)
|
55
|
+
end
|
56
|
+
|
57
|
+
it "yields messages in batches" do
|
58
|
+
products = create_list(:product, 15)
|
59
|
+
|
60
|
+
consumer = Redstream::Consumer.new(name: "consumer", stream_name: "products", batch_size: 10)
|
61
|
+
|
62
|
+
messages = nil
|
63
|
+
|
64
|
+
consumer.run_once do |batch|
|
65
|
+
messages = batch
|
66
|
+
end
|
67
|
+
|
68
|
+
expect(messages.size).to eq(10)
|
69
|
+
|
70
|
+
consumer.run_once do |batch|
|
71
|
+
messages = batch
|
72
|
+
end
|
73
|
+
|
74
|
+
expect(messages.size).to eq(5)
|
75
|
+
end
|
76
|
+
|
77
|
+
it "updates the offset" do
|
78
|
+
create :product
|
79
|
+
|
80
|
+
expect(redis.get(Redstream.offset_key_name(stream_name: "products", consumer_name: "consumer"))).to be(nil)
|
81
|
+
|
82
|
+
all_messages = redis.xrange(Redstream.stream_key_name("products"), "-", "+")
|
83
|
+
|
84
|
+
Redstream::Consumer.new(name: "consumer", stream_name: "products").run_once {}
|
85
|
+
|
86
|
+
expect(redis.get(Redstream.offset_key_name(stream_name: "products", consumer_name: "consumer"))).to eq(all_messages.last[0])
|
87
|
+
end
|
88
|
+
end
|
89
|
+
end
|
90
|
+
|
@@ -0,0 +1,53 @@
|
|
1
|
+
|
2
|
+
require File.expand_path("../spec_helper", __dir__)
|
3
|
+
|
4
|
+
RSpec.describe Redstream::Delayer do
|
5
|
+
describe "#run_once" do
|
6
|
+
it "copies expired messages to their target streams" do
|
7
|
+
redis.xadd Redstream.stream_key_name("target.delay"), payload: JSON.dump(value: "message")
|
8
|
+
|
9
|
+
expect(redis.xlen(Redstream.stream_key_name("target"))).to eq(0)
|
10
|
+
|
11
|
+
Redstream::Delayer.new(stream_name: "target", delay: 0).run_once
|
12
|
+
|
13
|
+
expect(redis.xlen(Redstream.stream_key_name("target"))).to eq(1)
|
14
|
+
expect(redis.xrange(Redstream.stream_key_name("target")).last[1]).to eq("payload" => JSON.dump(value: "message"))
|
15
|
+
end
|
16
|
+
|
17
|
+
it "delivers and commit before falling asleep" do
|
18
|
+
redis.xadd Redstream.stream_key_name("target.delay"), payload: JSON.dump(value: "message")
|
19
|
+
sleep 3
|
20
|
+
redis.xadd Redstream.stream_key_name("target.delay"), payload: JSON.dump(value: "message")
|
21
|
+
|
22
|
+
thread = Thread.new do
|
23
|
+
Redstream::Delayer.new(stream_name: "target", delay: 1).run_once
|
24
|
+
end
|
25
|
+
|
26
|
+
sleep 1
|
27
|
+
|
28
|
+
expect(redis.xlen(Redstream.stream_key_name("target"))).to eq(1)
|
29
|
+
expect(redis.get(Redstream.offset_key_name(stream_name: "target.delay", consumer_name: "delayer"))).not_to be_nil
|
30
|
+
|
31
|
+
thread.join
|
32
|
+
|
33
|
+
expect(redis.xlen(Redstream.stream_key_name("target"))).to eq(2)
|
34
|
+
end
|
35
|
+
|
36
|
+
it "does not copy not yet expired messages" do
|
37
|
+
redis.xadd Redstream.stream_key_name("target.delay"), payload: JSON.dump(value: "message")
|
38
|
+
|
39
|
+
thread = Thread.new do
|
40
|
+
Redstream::Delayer.new(stream_name: "target", delay: 2).run_once
|
41
|
+
end
|
42
|
+
|
43
|
+
sleep 1
|
44
|
+
|
45
|
+
expect(redis.xlen(Redstream.stream_key_name("target"))).to eq(0)
|
46
|
+
|
47
|
+
thread.join
|
48
|
+
|
49
|
+
expect(redis.xlen(Redstream.stream_key_name("target"))).to eq(1)
|
50
|
+
end
|
51
|
+
end
|
52
|
+
end
|
53
|
+
|
@@ -0,0 +1,68 @@
|
|
1
|
+
|
2
|
+
require File.expand_path("../spec_helper", __dir__)
|
3
|
+
|
4
|
+
RSpec.describe Redstream::Lock do
|
5
|
+
describe "#acquire" do
|
6
|
+
it "gets a lock" do
|
7
|
+
lock_results = Concurrent::Array.new
|
8
|
+
calls = Concurrent::AtomicFixnum.new(0)
|
9
|
+
|
10
|
+
threads = Array.new(2) do |i|
|
11
|
+
Thread.new do
|
12
|
+
lock_results << Redstream::Lock.new(name: "lock").acquire do
|
13
|
+
calls.increment
|
14
|
+
|
15
|
+
sleep 1
|
16
|
+
end
|
17
|
+
end
|
18
|
+
end
|
19
|
+
|
20
|
+
threads.each(&:join)
|
21
|
+
|
22
|
+
expect(calls.value).to eq(1)
|
23
|
+
expect(lock_results.to_set).to eq([1, nil].to_set)
|
24
|
+
end
|
25
|
+
|
26
|
+
it "keeps the lock" do
|
27
|
+
threads = []
|
28
|
+
calls = Concurrent::Array.new
|
29
|
+
|
30
|
+
threads << Thread.new do
|
31
|
+
Redstream::Lock.new(name: "lock").acquire do
|
32
|
+
calls << "thread-1"
|
33
|
+
|
34
|
+
sleep 6
|
35
|
+
end
|
36
|
+
end
|
37
|
+
|
38
|
+
sleep 6
|
39
|
+
|
40
|
+
threads << Thread.new do
|
41
|
+
Redstream::Lock.new(name: "lock").acquire do
|
42
|
+
calls << "thread-2"
|
43
|
+
end
|
44
|
+
end
|
45
|
+
|
46
|
+
threads.each(&:join)
|
47
|
+
|
48
|
+
expect(calls).to eq(["thread-1"])
|
49
|
+
end
|
50
|
+
|
51
|
+
it "does not lock itself" do
|
52
|
+
lock = Redstream::Lock.new(name: "lock")
|
53
|
+
|
54
|
+
lock_results = []
|
55
|
+
calls = 0
|
56
|
+
|
57
|
+
2.times do
|
58
|
+
lock_results << lock.acquire do
|
59
|
+
calls += 1
|
60
|
+
end
|
61
|
+
end
|
62
|
+
|
63
|
+
expect(calls).to eq(2)
|
64
|
+
expect(lock_results).to eq([1, 1])
|
65
|
+
end
|
66
|
+
end
|
67
|
+
end
|
68
|
+
|
@@ -0,0 +1,57 @@
|
|
1
|
+
|
2
|
+
require File.expand_path("../spec_helper", __dir__)
|
3
|
+
|
4
|
+
RSpec.describe Redstream::Model do
|
5
|
+
it "adds a delay message after save" do
|
6
|
+
expect(redis.xlen(Redstream.stream_key_name("products.delay"))).to eq(0)
|
7
|
+
|
8
|
+
time = Time.now
|
9
|
+
|
10
|
+
product = Timecop.freeze(time) do
|
11
|
+
create(:product)
|
12
|
+
end
|
13
|
+
|
14
|
+
expect(redis.xlen(Redstream.stream_key_name("products.delay"))).to eq(1)
|
15
|
+
expect(redis.xrange(Redstream.stream_key_name("products.delay"), "-", "+").first[1]).to eq("payload" => JSON.dump(product.redstream_payload))
|
16
|
+
end
|
17
|
+
|
18
|
+
it "adds a delay message after touch" do
|
19
|
+
expect(redis.xlen(Redstream.stream_key_name("products.delay"))).to eq(0)
|
20
|
+
|
21
|
+
product = create(:product)
|
22
|
+
|
23
|
+
time = Time.now
|
24
|
+
|
25
|
+
Timecop.freeze(time) do
|
26
|
+
product.touch
|
27
|
+
end
|
28
|
+
|
29
|
+
expect(redis.xlen(Redstream.stream_key_name("products.delay"))).to eq(2)
|
30
|
+
expect(redis.xrange(Redstream.stream_key_name("products.delay"), "-", "+").last[1]).to eq("payload" => JSON.dump(product.redstream_payload))
|
31
|
+
end
|
32
|
+
|
33
|
+
it "adds a delay message after destroy" do
|
34
|
+
expect(redis.xlen(Redstream.stream_key_name("products.delay"))).to eq(0)
|
35
|
+
|
36
|
+
product = create(:product)
|
37
|
+
|
38
|
+
time = Time.now
|
39
|
+
|
40
|
+
Timecop.freeze(time) do
|
41
|
+
product.touch
|
42
|
+
end
|
43
|
+
|
44
|
+
expect(redis.xlen(Redstream.stream_key_name("products.delay"))).to eq(2)
|
45
|
+
expect(redis.xrange(Redstream.stream_key_name("products.delay"), "-", "+").last[1]).to eq("payload" => JSON.dump(product.redstream_payload))
|
46
|
+
end
|
47
|
+
|
48
|
+
it "adds a queue message after commit" do
|
49
|
+
expect(redis.xlen(Redstream.stream_key_name("products"))).to eq(0)
|
50
|
+
|
51
|
+
product = create(:product)
|
52
|
+
|
53
|
+
expect(redis.xlen(Redstream.stream_key_name("products"))).to eq(1)
|
54
|
+
expect(redis.xrange(Redstream.stream_key_name("products"), "-", "+").first[1]).to eq("payload" => JSON.dump(product.redstream_payload))
|
55
|
+
end
|
56
|
+
end
|
57
|
+
|
@@ -0,0 +1,79 @@
|
|
1
|
+
|
2
|
+
require File.expand_path("../spec_helper", __dir__)
|
3
|
+
|
4
|
+
RSpec.describe Redstream::Producer do
|
5
|
+
describe "#queue" do
|
6
|
+
it "adds a queue message for individual objects" do
|
7
|
+
product = create(:product)
|
8
|
+
|
9
|
+
stream_key_name = Redstream.stream_key_name("products")
|
10
|
+
|
11
|
+
expect { Redstream::Producer.new.queue(product) }.to change { redis.xlen(stream_key_name) }.by(1)
|
12
|
+
expect(redis.xrange(stream_key_name, "-", "+").last[1]).to eq("payload" => JSON.dump(product.redstream_payload))
|
13
|
+
end
|
14
|
+
end
|
15
|
+
|
16
|
+
describe "#delay" do
|
17
|
+
it "adds a delay message for individual objects" do
|
18
|
+
product = create(:product)
|
19
|
+
|
20
|
+
stream_key_name = Redstream.stream_key_name("products.delay")
|
21
|
+
|
22
|
+
expect { Redstream::Producer.new.delay(product) }.to change { redis.xlen(stream_key_name) }.by(1)
|
23
|
+
expect(redis.xrange(stream_key_name, "-", "+").last[1]).to eq("payload" => JSON.dump(product.redstream_payload))
|
24
|
+
end
|
25
|
+
|
26
|
+
it "resepects wait" do
|
27
|
+
product = create(:product)
|
28
|
+
|
29
|
+
stream_key_name = Redstream.stream_key_name("products.delay")
|
30
|
+
|
31
|
+
expect { Redstream::Producer.new(wait: 0).delay(product) }.to change { redis.xlen(stream_key_name) }.by(1)
|
32
|
+
end
|
33
|
+
end
|
34
|
+
|
35
|
+
describe "#bulk_queue" do
|
36
|
+
it "adds bulk queue messages for scopes" do
|
37
|
+
products = create_list(:product, 2)
|
38
|
+
|
39
|
+
stream_key_name = Redstream.stream_key_name("products")
|
40
|
+
|
41
|
+
expect { Redstream::Producer.new.bulk_queue(Product.all) }.to change { redis.xlen(stream_key_name) }.by(2)
|
42
|
+
|
43
|
+
messages = redis.xrange(stream_key_name, "-", "+").last(2).map { |message| message[1] }
|
44
|
+
|
45
|
+
expect(messages).to eq([
|
46
|
+
{ "payload" => JSON.dump(products[0].redstream_payload) },
|
47
|
+
{ "payload" => JSON.dump(products[1].redstream_payload) }
|
48
|
+
])
|
49
|
+
end
|
50
|
+
end
|
51
|
+
|
52
|
+
describe "#bulk_delay" do
|
53
|
+
it "adds bulk delay messages for scopes" do
|
54
|
+
products = create_list(:product, 2)
|
55
|
+
|
56
|
+
stream_key_name = Redstream.stream_key_name("products.delay")
|
57
|
+
|
58
|
+
expect { Redstream::Producer.new.bulk_delay(Product.all) }.to change { redis.xlen(stream_key_name) }.by(2)
|
59
|
+
|
60
|
+
messages = redis.xrange(stream_key_name, "-", "+").last(2).map { |message| message[1] }
|
61
|
+
|
62
|
+
expect(messages).to eq([
|
63
|
+
{ "payload" => JSON.dump(products[0].redstream_payload) },
|
64
|
+
{ "payload" => JSON.dump(products[1].redstream_payload) }
|
65
|
+
])
|
66
|
+
end
|
67
|
+
|
68
|
+
it "should resepect wait for delay" do
|
69
|
+
product = create(:product)
|
70
|
+
|
71
|
+
stream_key_name = Redstream.stream_key_name("products.delay")
|
72
|
+
|
73
|
+
products = create_list(:product, 2)
|
74
|
+
|
75
|
+
expect { Redstream::Producer.new(wait: 0).bulk_delay(products) }.to change { redis.xlen(stream_key_name) }.by(2)
|
76
|
+
end
|
77
|
+
end
|
78
|
+
end
|
79
|
+
|