hivent 1.0.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/.codeclimate.yml +19 -0
- data/.gitignore +14 -0
- data/.rspec +1 -0
- data/.rubocop.yml +1063 -0
- data/.ruby-version +1 -0
- data/.simplecov.template +1 -0
- data/.travis.yml +23 -0
- data/.version +1 -0
- data/Gemfile +4 -0
- data/LICENSE +21 -0
- data/README.md +196 -0
- data/bin/hivent +5 -0
- data/hivent.gemspec +34 -0
- data/lib/hivent.rb +32 -0
- data/lib/hivent/abstract_signal.rb +63 -0
- data/lib/hivent/cli/consumer.rb +60 -0
- data/lib/hivent/cli/runner.rb +50 -0
- data/lib/hivent/cli/start_option_parser.rb +53 -0
- data/lib/hivent/config.rb +22 -0
- data/lib/hivent/config/options.rb +51 -0
- data/lib/hivent/emitter.rb +41 -0
- data/lib/hivent/life_cycle_event_handler.rb +41 -0
- data/lib/hivent/redis/consumer.rb +82 -0
- data/lib/hivent/redis/extensions.rb +26 -0
- data/lib/hivent/redis/lua/consumer.lua +179 -0
- data/lib/hivent/redis/lua/producer.lua +27 -0
- data/lib/hivent/redis/producer.rb +24 -0
- data/lib/hivent/redis/redis.rb +14 -0
- data/lib/hivent/redis/signal.rb +36 -0
- data/lib/hivent/rspec.rb +11 -0
- data/lib/hivent/signal.rb +14 -0
- data/lib/hivent/spec.rb +11 -0
- data/lib/hivent/spec/matchers.rb +14 -0
- data/lib/hivent/spec/matchers/emit.rb +116 -0
- data/lib/hivent/spec/signal.rb +60 -0
- data/lib/hivent/version.rb +6 -0
- data/spec/codeclimate_helper.rb +5 -0
- data/spec/fixtures/cli/bootstrap_consumers.rb +7 -0
- data/spec/fixtures/cli/life_cycle_event_test.rb +8 -0
- data/spec/hivent/abstract_signal_spec.rb +161 -0
- data/spec/hivent/cli/consumer_spec.rb +68 -0
- data/spec/hivent/cli/runner_spec.rb +75 -0
- data/spec/hivent/cli/start_option_parser_spec.rb +48 -0
- data/spec/hivent/life_cycle_event_handler_spec.rb +38 -0
- data/spec/hivent/redis/consumer_spec.rb +348 -0
- data/spec/hivent/redis/signal_spec.rb +155 -0
- data/spec/hivent_spec.rb +100 -0
- data/spec/spec/matchers/emit_spec.rb +66 -0
- data/spec/spec/signal_spec.rb +72 -0
- data/spec/spec_helper.rb +27 -0
- data/spec/support/matchers/exit_with_code.rb +28 -0
- data/spec/support/stdout_helpers.rb +25 -0
- metadata +267 -0
@@ -0,0 +1,50 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
require 'optparse'
|
3
|
+
require "active_support"
|
4
|
+
require "active_support/core_ext"
|
5
|
+
|
6
|
+
require_relative "./start_option_parser"
|
7
|
+
require_relative "./consumer"
|
8
|
+
|
9
|
+
module Hivent
|
10
|
+
|
11
|
+
module CLI
|
12
|
+
|
13
|
+
class Runner
|
14
|
+
|
15
|
+
OPTION_PARSERS = {
|
16
|
+
start: StartOptionParser
|
17
|
+
}.freeze
|
18
|
+
|
19
|
+
def initialize(argv)
|
20
|
+
@argv = argv
|
21
|
+
@command = @argv.shift.to_s.to_sym
|
22
|
+
end
|
23
|
+
|
24
|
+
def run
|
25
|
+
if parser = OPTION_PARSERS[@command]
|
26
|
+
send(@command, parser.new(@command, @argv).parse)
|
27
|
+
else
|
28
|
+
puts help
|
29
|
+
end
|
30
|
+
end
|
31
|
+
|
32
|
+
private
|
33
|
+
|
34
|
+
def start(options)
|
35
|
+
Consumer.run!(options)
|
36
|
+
end
|
37
|
+
|
38
|
+
def help
|
39
|
+
<<-EOS.strip_heredoc
|
40
|
+
Available COMMANDs are:
|
41
|
+
start : starts one or multiple the consumer
|
42
|
+
See 'hivent COMMAND --help' for more information on a specific command.
|
43
|
+
EOS
|
44
|
+
end
|
45
|
+
|
46
|
+
end
|
47
|
+
|
48
|
+
end
|
49
|
+
|
50
|
+
end
|
@@ -0,0 +1,53 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
module Hivent
|
3
|
+
|
4
|
+
module CLI
|
5
|
+
|
6
|
+
class StartOptionParser
|
7
|
+
|
8
|
+
def initialize(command, argv)
|
9
|
+
@command = command
|
10
|
+
@argv = argv
|
11
|
+
end
|
12
|
+
|
13
|
+
def parse
|
14
|
+
return @options if @options
|
15
|
+
@options = {}
|
16
|
+
|
17
|
+
parser = OptionParser.new do |o|
|
18
|
+
o.banner = "Usage: hivent #{@command} [options]"
|
19
|
+
|
20
|
+
o.on('-r', '--require PATH', 'File to require to bootstrap consumers') do |arg|
|
21
|
+
@options[:require] = arg
|
22
|
+
end
|
23
|
+
|
24
|
+
o.on('-p', '--pid-dir DIR', 'Location of worker pid files') do |arg|
|
25
|
+
@options[:pid_dir] = arg
|
26
|
+
end
|
27
|
+
end
|
28
|
+
|
29
|
+
parser.parse(@argv)
|
30
|
+
|
31
|
+
validate_options
|
32
|
+
|
33
|
+
@options
|
34
|
+
end
|
35
|
+
|
36
|
+
def validate_options
|
37
|
+
if @options[:require].nil? || !File.exist?(@options[:require])
|
38
|
+
puts <<-EOS.strip_heredoc
|
39
|
+
=========================================================
|
40
|
+
Please point hivent to a Ruby file
|
41
|
+
to load your consumers with -r FILE.
|
42
|
+
=========================================================
|
43
|
+
EOS
|
44
|
+
|
45
|
+
exit(1)
|
46
|
+
end
|
47
|
+
end
|
48
|
+
|
49
|
+
end
|
50
|
+
|
51
|
+
end
|
52
|
+
|
53
|
+
end
|
@@ -0,0 +1,22 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
require "hivent/life_cycle_event_handler"
|
3
|
+
require "hivent/config/options"
|
4
|
+
|
5
|
+
module Hivent
|
6
|
+
|
7
|
+
module Config
|
8
|
+
|
9
|
+
SUPPORTED_BACKENDS = [:redis].freeze
|
10
|
+
|
11
|
+
extend self
|
12
|
+
extend Options
|
13
|
+
|
14
|
+
option :client_id, validate: ->(value) { value.present? }
|
15
|
+
option :backend, validate: ->(value) { SUPPORTED_BACKENDS.include?(value.to_sym) }
|
16
|
+
option :endpoint
|
17
|
+
option :partition_count, default: 1, validate: ->(value) { value.is_a?(Integer) && value.positive? }
|
18
|
+
option :life_cycle_event_handler, default: LifeCycleEventHandler.new
|
19
|
+
|
20
|
+
end
|
21
|
+
|
22
|
+
end
|
@@ -0,0 +1,51 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
module Hivent
|
3
|
+
|
4
|
+
module Config
|
5
|
+
|
6
|
+
module Options
|
7
|
+
|
8
|
+
class UnsupportedOptionError < StandardError; end
|
9
|
+
|
10
|
+
def defaults
|
11
|
+
@defaults ||= {}
|
12
|
+
end
|
13
|
+
|
14
|
+
def validators
|
15
|
+
@validators ||= {}
|
16
|
+
end
|
17
|
+
|
18
|
+
def option(name, options = {})
|
19
|
+
defaults[name] = settings[name] = options[:default]
|
20
|
+
validators[name] = options[:validate] || ->(_value) { true }
|
21
|
+
|
22
|
+
class_eval <<-RUBY
|
23
|
+
def #{name}
|
24
|
+
settings[#{name.inspect}]
|
25
|
+
end
|
26
|
+
def #{name}=(value)
|
27
|
+
unless validators[#{name.inspect.to_sym}].(value)
|
28
|
+
raise UnsupportedOptionError.new("Unsupported value " + value.inspect + " for option #{name.inspect}")
|
29
|
+
end
|
30
|
+
|
31
|
+
settings[#{name.inspect}] = value
|
32
|
+
end
|
33
|
+
def #{name}?
|
34
|
+
#{name}
|
35
|
+
end
|
36
|
+
|
37
|
+
def reset_#{name}
|
38
|
+
settings[#{name.inspect}] = defaults[#{name.inspect}]
|
39
|
+
end
|
40
|
+
RUBY
|
41
|
+
end
|
42
|
+
|
43
|
+
def settings
|
44
|
+
@settings ||= {}
|
45
|
+
end
|
46
|
+
|
47
|
+
end
|
48
|
+
|
49
|
+
end
|
50
|
+
|
51
|
+
end
|
@@ -0,0 +1,41 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
module Hivent
|
3
|
+
|
4
|
+
class Emitter
|
5
|
+
|
6
|
+
include EventEmitter
|
7
|
+
attr_accessor :events
|
8
|
+
|
9
|
+
WILDCARD = :all
|
10
|
+
|
11
|
+
def initialize
|
12
|
+
@events = []
|
13
|
+
end
|
14
|
+
|
15
|
+
def broadcast(payload)
|
16
|
+
emittable_event_names(payload.with_indifferent_access).each do |emittable_event_name|
|
17
|
+
emit(emittable_event_name, payload)
|
18
|
+
end
|
19
|
+
end
|
20
|
+
|
21
|
+
private
|
22
|
+
|
23
|
+
def emittable_event_names(payload)
|
24
|
+
[
|
25
|
+
event_name(payload),
|
26
|
+
[event_name(payload), event_version(payload)].join(":"),
|
27
|
+
WILDCARD
|
28
|
+
]
|
29
|
+
end
|
30
|
+
|
31
|
+
def event_name(payload)
|
32
|
+
payload[:meta].try(:[], :name)
|
33
|
+
end
|
34
|
+
|
35
|
+
def event_version(payload)
|
36
|
+
payload[:meta].try(:[], :version)
|
37
|
+
end
|
38
|
+
|
39
|
+
end
|
40
|
+
|
41
|
+
end
|
@@ -0,0 +1,41 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
module Hivent
|
3
|
+
|
4
|
+
class LifeCycleEventHandler
|
5
|
+
|
6
|
+
# Invoked when a consumer worker starts and registers events and partion count.
|
7
|
+
#
|
8
|
+
# parameters:
|
9
|
+
# client_id: name of the application
|
10
|
+
# events: array of hashes for the registered events ([{ name: "my:event", version: 1 }, ...])
|
11
|
+
# partition_count: number of partitions registered for this application
|
12
|
+
def application_registered(client_id, events, partition_count)
|
13
|
+
# do nothing
|
14
|
+
end
|
15
|
+
|
16
|
+
# Invoked when an event has successfully been processed by all registered handlers
|
17
|
+
#
|
18
|
+
# parameters:
|
19
|
+
# event_name: name of the processed event
|
20
|
+
# event_version: version of the processed event
|
21
|
+
# payload: payload of the processed event
|
22
|
+
def event_processing_succeeded(event_name, event_version, payload)
|
23
|
+
# do nothing
|
24
|
+
end
|
25
|
+
|
26
|
+
# Invoked when processing an event failed. Either the payload could not be parsed as JSON or the payload did not
|
27
|
+
# contain all required information or an application error happend while processing in one of the registered
|
28
|
+
# handlers.
|
29
|
+
#
|
30
|
+
# parameters:
|
31
|
+
# exception: the exception that occurred
|
32
|
+
# payload: the parsed payload or nil if event payload was invalid JSON
|
33
|
+
# raw_payload: the original unparsed payload (String)
|
34
|
+
# dead_letter_queue_name: name of the dead letter queue this event has been sent to
|
35
|
+
def event_processing_failed(exception, payload, raw_payload, dead_letter_queue_name)
|
36
|
+
# do nothing
|
37
|
+
end
|
38
|
+
|
39
|
+
end
|
40
|
+
|
41
|
+
end
|
@@ -0,0 +1,82 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
module Hivent
|
3
|
+
|
4
|
+
module Redis
|
5
|
+
|
6
|
+
class Consumer
|
7
|
+
|
8
|
+
include Hivent::Redis::Extensions
|
9
|
+
|
10
|
+
LUA_CONSUMER = File.expand_path("../lua/consumer.lua", __FILE__)
|
11
|
+
# In milliseconds
|
12
|
+
SLEEP_TIME = 200
|
13
|
+
CONSUMER_TTL = 1000
|
14
|
+
|
15
|
+
def initialize(redis, service_name, name, life_cycle_event_handler)
|
16
|
+
@redis = redis
|
17
|
+
@service_name = service_name
|
18
|
+
@name = name
|
19
|
+
@stop = false
|
20
|
+
@life_cycle_event_handler = life_cycle_event_handler
|
21
|
+
end
|
22
|
+
|
23
|
+
def run!
|
24
|
+
consume while !@stop
|
25
|
+
end
|
26
|
+
|
27
|
+
def stop!
|
28
|
+
@stop = true
|
29
|
+
end
|
30
|
+
|
31
|
+
def queues
|
32
|
+
script(LUA_CONSUMER, @service_name, @name, CONSUMER_TTL)
|
33
|
+
end
|
34
|
+
|
35
|
+
def consume
|
36
|
+
to_process = items
|
37
|
+
|
38
|
+
to_process.each do |(queue, item)|
|
39
|
+
payload = nil
|
40
|
+
begin
|
41
|
+
payload = JSON.parse(item).with_indifferent_access
|
42
|
+
|
43
|
+
Hivent.emitter.broadcast(payload)
|
44
|
+
|
45
|
+
@life_cycle_event_handler.event_processing_succeeded(event_name(payload), event_version(payload), payload)
|
46
|
+
rescue => e
|
47
|
+
@redis.lpush(dead_letter_queue_name(queue), item)
|
48
|
+
|
49
|
+
@life_cycle_event_handler.event_processing_failed(e, payload, item, dead_letter_queue_name(queue))
|
50
|
+
end
|
51
|
+
|
52
|
+
@redis.rpop(queue)
|
53
|
+
end
|
54
|
+
|
55
|
+
Kernel.sleep(SLEEP_TIME.to_f / 1000) if to_process.empty?
|
56
|
+
end
|
57
|
+
|
58
|
+
private
|
59
|
+
|
60
|
+
def items
|
61
|
+
queues
|
62
|
+
.map { |queue| [queue, @redis.lindex(queue, -1)] }
|
63
|
+
.select { |(_queue, item)| item }
|
64
|
+
end
|
65
|
+
|
66
|
+
def event_name(payload)
|
67
|
+
payload["meta"].try(:[], "name")
|
68
|
+
end
|
69
|
+
|
70
|
+
def event_version(payload)
|
71
|
+
payload["meta"].try(:[], "version")
|
72
|
+
end
|
73
|
+
|
74
|
+
def dead_letter_queue_name(queue)
|
75
|
+
"#{queue}:dead_letter"
|
76
|
+
end
|
77
|
+
|
78
|
+
end
|
79
|
+
|
80
|
+
end
|
81
|
+
|
82
|
+
end
|
@@ -0,0 +1,26 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
module Hivent
|
3
|
+
|
4
|
+
module Redis
|
5
|
+
|
6
|
+
module Extensions
|
7
|
+
|
8
|
+
LUA_CACHE = Hash.new { |h, k| h[k] = Hash.new }
|
9
|
+
|
10
|
+
def script(file, *args)
|
11
|
+
cache = LUA_CACHE[@redis.client.options[:url]]
|
12
|
+
|
13
|
+
sha = if cache.key?(file)
|
14
|
+
cache[file]
|
15
|
+
else
|
16
|
+
cache[file] = @redis.script("LOAD", File.read(file))
|
17
|
+
end
|
18
|
+
|
19
|
+
@redis.evalsha(sha, [], args)
|
20
|
+
end
|
21
|
+
|
22
|
+
end
|
23
|
+
|
24
|
+
end
|
25
|
+
|
26
|
+
end
|
@@ -0,0 +1,179 @@
|
|
1
|
+
local service_name = ARGV[1]
|
2
|
+
local consumer_name = ARGV[2]
|
3
|
+
local CONSUMER_TTL = ARGV[3]
|
4
|
+
|
5
|
+
-- Performs deep equality between two tables
|
6
|
+
local function table_eq(table1, table2)
|
7
|
+
local avoid_loops = {}
|
8
|
+
local function recurse(t1, t2)
|
9
|
+
-- compare value types
|
10
|
+
if type(t1) ~= type(t2) then return false end
|
11
|
+
-- Base case: compare simple values
|
12
|
+
if type(t1) ~= "table" then return t1 == t2 end
|
13
|
+
-- Now, on to tables.
|
14
|
+
-- First, let's avoid looping forever.
|
15
|
+
if avoid_loops[t1] then return avoid_loops[t1] == t2 end
|
16
|
+
avoid_loops[t1] = t2
|
17
|
+
-- Copy keys from t2
|
18
|
+
local t2keys = {}
|
19
|
+
local t2tablekeys = {}
|
20
|
+
for k, _ in pairs(t2) do
|
21
|
+
if type(k) == "table" then table.insert(t2tablekeys, k) end
|
22
|
+
t2keys[k] = true
|
23
|
+
end
|
24
|
+
-- Let's iterate keys from t1
|
25
|
+
for k1, v1 in pairs(t1) do
|
26
|
+
local v2 = t2[k1]
|
27
|
+
if type(k1) == "table" then
|
28
|
+
-- if key is a table, we need to find an equivalent one.
|
29
|
+
local ok = false
|
30
|
+
for i, tk in ipairs(t2tablekeys) do
|
31
|
+
if table_eq(k1, tk) and recurse(v1, t2[tk]) then
|
32
|
+
table.remove(t2tablekeys, i)
|
33
|
+
t2keys[tk] = nil
|
34
|
+
ok = true
|
35
|
+
break
|
36
|
+
end
|
37
|
+
end
|
38
|
+
if not ok then return false end
|
39
|
+
else
|
40
|
+
-- t1 has a key which t2 doesn't have, fail.
|
41
|
+
if v2 == nil then return false end
|
42
|
+
t2keys[k1] = nil
|
43
|
+
if not recurse(v1, v2) then return false end
|
44
|
+
end
|
45
|
+
end
|
46
|
+
-- if t2 has a key which t1 doesn't have, fail.
|
47
|
+
if next(t2keys) then return false end
|
48
|
+
return true
|
49
|
+
end
|
50
|
+
return recurse(table1, table2)
|
51
|
+
end
|
52
|
+
|
53
|
+
local function keepalive(service, consumer)
|
54
|
+
redis.call("SET", service .. ":" .. consumer .. ":alive", "true", "PX", CONSUMER_TTL)
|
55
|
+
redis.call("SADD", service .. ":consumers", consumer)
|
56
|
+
end
|
57
|
+
|
58
|
+
local function cleanup(service)
|
59
|
+
local consumer_index_key = service .. ":consumers"
|
60
|
+
local consumers = redis.call("SMEMBERS", consumer_index_key)
|
61
|
+
|
62
|
+
for _, consumer in ipairs(consumers) do
|
63
|
+
local consumer_status_key = service .. ":" .. consumer .. ":alive"
|
64
|
+
local alive = redis.call("GET", consumer_status_key)
|
65
|
+
|
66
|
+
if not alive then
|
67
|
+
redis.call("SREM", consumer_index_key, consumer)
|
68
|
+
end
|
69
|
+
end
|
70
|
+
end
|
71
|
+
|
72
|
+
local function distribute(consumers, partition_count)
|
73
|
+
local distribution = {}
|
74
|
+
local consumer_count = table.getn(consumers)
|
75
|
+
local remainder = partition_count % consumer_count
|
76
|
+
|
77
|
+
for i=1,consumer_count do
|
78
|
+
distribution[i] = math.floor(partition_count/consumer_count)
|
79
|
+
end
|
80
|
+
|
81
|
+
for i=1,remainder do
|
82
|
+
distribution[i] = distribution[i] + 1
|
83
|
+
end
|
84
|
+
|
85
|
+
return distribution
|
86
|
+
end
|
87
|
+
|
88
|
+
local function getdesiredstate(service_name, consumers, partition_count)
|
89
|
+
local state = {}
|
90
|
+
local distribution = distribute(consumers, partition_count)
|
91
|
+
local consumer_count = table.getn(consumers)
|
92
|
+
local assigned_partition_count = 0
|
93
|
+
|
94
|
+
for i=1,consumer_count do
|
95
|
+
state[consumers[i]] = {}
|
96
|
+
|
97
|
+
for j=1,distribution[i] do
|
98
|
+
table.insert(state[consumers[i]], 1, service_name .. ":" .. j + assigned_partition_count - 1)
|
99
|
+
end
|
100
|
+
|
101
|
+
assigned_partition_count = assigned_partition_count + distribution[i]
|
102
|
+
end
|
103
|
+
|
104
|
+
return state
|
105
|
+
end
|
106
|
+
|
107
|
+
local function getcurrentstate(service_name, consumers)
|
108
|
+
local state = {}
|
109
|
+
|
110
|
+
for _, consumer in ipairs(consumers) do
|
111
|
+
local assigned_key = service_name .. ":" .. consumer .. ":assigned"
|
112
|
+
state[consumer] = redis.call("LRANGE", assigned_key, 0, -1)
|
113
|
+
end
|
114
|
+
|
115
|
+
return state
|
116
|
+
end
|
117
|
+
|
118
|
+
local function states_match(state1, state2)
|
119
|
+
return table_eq(state1, state2)
|
120
|
+
end
|
121
|
+
|
122
|
+
local function all_free(workers)
|
123
|
+
local total_count = 0
|
124
|
+
|
125
|
+
for _, partitions in pairs(workers) do
|
126
|
+
total_count = total_count + table.getn(partitions)
|
127
|
+
end
|
128
|
+
|
129
|
+
return total_count == 0
|
130
|
+
end
|
131
|
+
|
132
|
+
local function save_state(service_name, state)
|
133
|
+
for worker, partitions in pairs(state) do
|
134
|
+
for _, partition in ipairs(partitions) do
|
135
|
+
redis.call("RPUSH", service_name .. ":" .. worker .. ":assigned", partition)
|
136
|
+
redis.call("EXPIRE", service_name .. ":" .. worker .. ":assigned", CONSUMER_TTL)
|
137
|
+
end
|
138
|
+
end
|
139
|
+
end
|
140
|
+
|
141
|
+
local function rebalance(service_name, consumer_name)
|
142
|
+
local consumers = redis.call("SMEMBERS", service_name .. ":consumers")
|
143
|
+
table.sort(consumers)
|
144
|
+
local partition_count = tonumber(redis.call("GET", service_name .. ":partition_count"))
|
145
|
+
|
146
|
+
local desired_state = getdesiredstate(service_name, consumers, partition_count)
|
147
|
+
|
148
|
+
local current_state = getcurrentstate(service_name, consumers)
|
149
|
+
|
150
|
+
local is_stable_state = states_match(desired_state, current_state)
|
151
|
+
|
152
|
+
if not is_stable_state then
|
153
|
+
if all_free(current_state) then
|
154
|
+
save_state(service_name, desired_state)
|
155
|
+
|
156
|
+
return desired_state[consumer_name]
|
157
|
+
else
|
158
|
+
redis.call("DEL", service_name .. ":" .. consumer_name .. ":assigned")
|
159
|
+
return {}
|
160
|
+
end
|
161
|
+
else
|
162
|
+
return desired_state[consumer_name]
|
163
|
+
end
|
164
|
+
end
|
165
|
+
|
166
|
+
local function heartbeat(service_name, consumer_name)
|
167
|
+
-- keep consumer alive
|
168
|
+
keepalive(service_name, consumer_name)
|
169
|
+
|
170
|
+
-- clean up dead consumers
|
171
|
+
cleanup(service_name)
|
172
|
+
|
173
|
+
-- rebalance
|
174
|
+
local new_config = rebalance(service_name, consumer_name)
|
175
|
+
|
176
|
+
return new_config
|
177
|
+
end
|
178
|
+
|
179
|
+
return heartbeat(service_name, consumer_name)
|