receptor_controller-client 0.0.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/CHANGELOG.md +16 -0
- data/Gemfile +10 -0
- data/LICENSE.txt +202 -0
- data/README.md +8 -0
- data/Rakefile +7 -0
- data/lib/receptor_controller-client.rb +4 -0
- data/lib/receptor_controller/client.rb +94 -0
- data/lib/receptor_controller/client/configuration.rb +85 -0
- data/lib/receptor_controller/client/directive.rb +33 -0
- data/lib/receptor_controller/client/directive_blocking.rb +87 -0
- data/lib/receptor_controller/client/directive_non_blocking.rb +144 -0
- data/lib/receptor_controller/client/exception.rb +10 -0
- data/lib/receptor_controller/client/response_worker.rb +214 -0
- data/lib/receptor_controller/client/version.rb +5 -0
- data/receptor_controller-client.gemspec +41 -0
- data/spec/receptor_controller/client_spec.rb +87 -0
- data/spec/receptor_controller/directive_blocking_spec.rb +197 -0
- data/spec/receptor_controller/directive_non_blocking_spec.rb +160 -0
- data/spec/receptor_controller/response_worker_spec.rb +169 -0
- data/spec/spec_helper.rb +19 -0
- metadata +268 -0
@@ -0,0 +1,87 @@
|
|
1
|
+
require "receptor_controller/client/directive"
|
2
|
+
|
3
|
+
module ReceptorController
|
4
|
+
# Blocking directive for requests through POST /job
|
5
|
+
# Successful POST causes locking current thread until response from Kafka comes
|
6
|
+
#
|
7
|
+
# Raises kind of ReceptorController::Client::Error in case of problems/timeout
|
8
|
+
class Client::DirectiveBlocking < Client::Directive
|
9
|
+
def initialize(name:, account:, node_id:, payload:, client:, log_message_common: nil)
|
10
|
+
super
|
11
|
+
self.response_lock = Mutex.new
|
12
|
+
self.response_waiting = ConditionVariable.new
|
13
|
+
self.response_data = nil
|
14
|
+
self.response_exception = nil
|
15
|
+
end
|
16
|
+
|
17
|
+
def call(body = default_body)
|
18
|
+
@url = JSON.parse(body[:payload])['url']
|
19
|
+
response = connection.post(config.job_path, body.to_json)
|
20
|
+
|
21
|
+
msg_id = JSON.parse(response.body)['id']
|
22
|
+
|
23
|
+
logger.debug("Receptor response: registering message #{msg_id}".tap { |msg| msg << " req: #{body["href_slug"]}" if ENV["LOG_ALL_RECEPTOR_MESSAGES"]&.to_i != 0 })
|
24
|
+
# registers message id for kafka responses
|
25
|
+
response_worker.register_message(msg_id, self)
|
26
|
+
wait_for_response(msg_id)
|
27
|
+
rescue Faraday::Error => e
|
28
|
+
msg = receptor_log_msg("Directive #{name} failed (#{log_message_common}) [MSG: #{msg_id}]", account, node_id, e)
|
29
|
+
raise ReceptorController::Client::ControllerResponseError.new(msg)
|
30
|
+
end
|
31
|
+
|
32
|
+
def wait_for_response(_msg_id)
|
33
|
+
response_lock.synchronize do
|
34
|
+
response_waiting.wait(response_lock)
|
35
|
+
|
36
|
+
raise response_exception if response_failed?
|
37
|
+
|
38
|
+
response_data.dup
|
39
|
+
end
|
40
|
+
end
|
41
|
+
|
42
|
+
# TODO: Review when future plugins with more "response" messages come
|
43
|
+
def response_success(msg_id, message_type, response)
|
44
|
+
response_lock.synchronize do
|
45
|
+
if message_type == MESSAGE_TYPE_RESPONSE
|
46
|
+
self.response_data = response
|
47
|
+
elsif message_type == MESSAGE_TYPE_EOF
|
48
|
+
response_waiting.signal
|
49
|
+
else
|
50
|
+
self.response_exception = ReceptorController::Client::UnknownResponseTypeError.new("#{log_message_common}[MSG: #{msg_id}]")
|
51
|
+
response_waiting.signal
|
52
|
+
end
|
53
|
+
end
|
54
|
+
end
|
55
|
+
|
56
|
+
def response_error(msg_id, response_code, err_message)
|
57
|
+
response_lock.synchronize do
|
58
|
+
self.response_data = nil
|
59
|
+
self.response_exception = ReceptorController::Client::ResponseError.new("#{err_message} (code: #{response_code}) (#{log_message_common}) [MSG: #{msg_id}]")
|
60
|
+
response_waiting.signal
|
61
|
+
end
|
62
|
+
end
|
63
|
+
|
64
|
+
def response_timeout(msg_id)
|
65
|
+
response_lock.synchronize do
|
66
|
+
self.response_data = nil
|
67
|
+
self.response_exception = ReceptorController::Client::ResponseTimeoutError.new("Timeout (#{log_message_common}) [MSG: #{msg_id}]")
|
68
|
+
response_waiting.signal
|
69
|
+
end
|
70
|
+
end
|
71
|
+
|
72
|
+
private
|
73
|
+
|
74
|
+
attr_accessor :response_data, :response_exception, :response_lock, :response_waiting
|
75
|
+
|
76
|
+
def connection
|
77
|
+
@connection ||= Faraday.new(config.controller_url, :headers => client.headers) do |c|
|
78
|
+
c.use(Faraday::Response::RaiseError)
|
79
|
+
c.adapter(Faraday.default_adapter)
|
80
|
+
end
|
81
|
+
end
|
82
|
+
|
83
|
+
def response_failed?
|
84
|
+
response_exception.present?
|
85
|
+
end
|
86
|
+
end
|
87
|
+
end
|
@@ -0,0 +1,144 @@
|
|
1
|
+
require "concurrent"
|
2
|
+
require "receptor_controller/client/directive"
|
3
|
+
|
4
|
+
module ReceptorController
|
5
|
+
# Non-blocking directive for requests through POST /job
|
6
|
+
# Directive's call returns either message ID or nil
|
7
|
+
#
|
8
|
+
# Callback blocks can be specified for handling responses
|
9
|
+
# @example:
|
10
|
+
# receiver = <object with methods below>
|
11
|
+
# directive
|
12
|
+
# .on_success do |msg_id, response|
|
13
|
+
# receiver.process_response(msg_id, response)
|
14
|
+
# end
|
15
|
+
# .on_error do |msg_id, code, response|
|
16
|
+
# receiver.process_error(msg_id, code, response)
|
17
|
+
# end
|
18
|
+
# .on_timeout do |msg_id|
|
19
|
+
# receiver.process_timeout(msg_id)
|
20
|
+
# end
|
21
|
+
# .on_eof do |msg_id|
|
22
|
+
# receiver.process_eof(msg_id)
|
23
|
+
# end
|
24
|
+
# .on_eof do |msg_id|
|
25
|
+
# logger.debug("[#{msg_id}] EOF message received")
|
26
|
+
# end
|
27
|
+
#
|
28
|
+
# directive.call
|
29
|
+
class Client::DirectiveNonBlocking < Client::Directive
|
30
|
+
def initialize(name:, account:, node_id:, payload:, client:, log_message_common: nil)
|
31
|
+
super
|
32
|
+
|
33
|
+
@success_callbacks = []
|
34
|
+
@eof_callbacks = []
|
35
|
+
@timeout_callbacks = []
|
36
|
+
@error_callbacks = []
|
37
|
+
|
38
|
+
@responses_count = Concurrent::AtomicFixnum.new
|
39
|
+
@eof_lock = Mutex.new
|
40
|
+
@eof_wait = ConditionVariable.new
|
41
|
+
end
|
42
|
+
|
43
|
+
# Entrypoint for request
|
44
|
+
def call(body = default_body)
|
45
|
+
response = Faraday.post(config.job_url, body.to_json, client.headers)
|
46
|
+
if response.success?
|
47
|
+
msg_id = JSON.parse(response.body)['id']
|
48
|
+
|
49
|
+
# registers message id for kafka responses
|
50
|
+
response_worker.register_message(msg_id, self)
|
51
|
+
|
52
|
+
msg_id
|
53
|
+
else
|
54
|
+
logger.error(receptor_log_msg("Directive #{name} failed (#{log_message_common}): HTTP #{response.status}", account, node_id))
|
55
|
+
nil
|
56
|
+
end
|
57
|
+
rescue Faraday::Error => e
|
58
|
+
logger.error(receptor_log_msg("Directive #{name} failed (#{log_message_common}). POST /job error", account, node_id, e))
|
59
|
+
nil
|
60
|
+
rescue => e
|
61
|
+
logger.error(receptor_log_msg("Directive #{name} failed (#{log_message_common})", account, node_id, e))
|
62
|
+
nil
|
63
|
+
end
|
64
|
+
|
65
|
+
def on_success(&block)
|
66
|
+
@success_callbacks << block if block_given?
|
67
|
+
self
|
68
|
+
end
|
69
|
+
|
70
|
+
def on_eof(&block)
|
71
|
+
@eof_callbacks << block if block_given?
|
72
|
+
self
|
73
|
+
end
|
74
|
+
|
75
|
+
def on_timeout(&block)
|
76
|
+
@timeout_callbacks << block if block_given?
|
77
|
+
self
|
78
|
+
end
|
79
|
+
|
80
|
+
def on_error(&block)
|
81
|
+
@error_callbacks << block if block_given?
|
82
|
+
self
|
83
|
+
end
|
84
|
+
|
85
|
+
# Handles successful responses in Threads
|
86
|
+
# EOF processing waits until all response threads are finished
|
87
|
+
def response_success(msg_id, message_type, response)
|
88
|
+
if message_type == MESSAGE_TYPE_EOF
|
89
|
+
eof_thread do
|
90
|
+
@eof_callbacks.each { |block| block.call(msg_id) }
|
91
|
+
end
|
92
|
+
else
|
93
|
+
response_thread do
|
94
|
+
@success_callbacks.each { |block| block.call(msg_id, response) }
|
95
|
+
end
|
96
|
+
end
|
97
|
+
end
|
98
|
+
|
99
|
+
# Handles error responses in Threads
|
100
|
+
# EOF processing waits until all threads are finished
|
101
|
+
def response_error(msg_id, response_code, response)
|
102
|
+
response_thread do
|
103
|
+
@error_callbacks.each { |block| block.call(msg_id, response_code, response) }
|
104
|
+
end
|
105
|
+
end
|
106
|
+
|
107
|
+
# Error state: Any response wasn't received in `Configuration.response_timeout`
|
108
|
+
def response_timeout(msg_id)
|
109
|
+
response_thread do
|
110
|
+
@timeout_callbacks.each { |block| block.call(msg_id) }
|
111
|
+
end
|
112
|
+
end
|
113
|
+
|
114
|
+
private
|
115
|
+
|
116
|
+
# Responses are processed in threads to be able to call subrequests
|
117
|
+
# EOF response is blocked by thread-safe counter
|
118
|
+
def response_thread
|
119
|
+
@responses_count.increment
|
120
|
+
|
121
|
+
Thread.new do
|
122
|
+
yield
|
123
|
+
ensure
|
124
|
+
@responses_count.decrement
|
125
|
+
@eof_lock.synchronize do
|
126
|
+
@eof_wait.signal if @responses_count.value == 0
|
127
|
+
end
|
128
|
+
end
|
129
|
+
end
|
130
|
+
|
131
|
+
# Messages in kafka are received serialized, EOF is always last
|
132
|
+
# => @responses_count has to be always positive
|
133
|
+
# until all responses are processed
|
134
|
+
def eof_thread
|
135
|
+
Thread.new do
|
136
|
+
@eof_lock.synchronize do
|
137
|
+
@eof_wait.wait(@eof_lock) if @responses_count.value > 0
|
138
|
+
end
|
139
|
+
|
140
|
+
yield
|
141
|
+
end
|
142
|
+
end
|
143
|
+
end
|
144
|
+
end
|
@@ -0,0 +1,214 @@
|
|
1
|
+
require 'base64'
|
2
|
+
require "concurrent"
|
3
|
+
require 'stringio'
|
4
|
+
require 'zlib'
|
5
|
+
|
6
|
+
module ReceptorController
|
7
|
+
# ResponseWorker is listening on Kafka topic platform.receptor-controller.responses (@see Configuration.queue_topic)
|
8
|
+
# It asynchronously receives responses requested by POST /job to receptor controller.
|
9
|
+
# Request and response is paired by message ID (response of POST /job and 'in_response_to' value in kafka response here)
|
10
|
+
#
|
11
|
+
# Successful responses are at least two:
|
12
|
+
# * 1+ of 'response' type, containing data
|
13
|
+
# * 1 of 'eof' type, signalizing end of transmission
|
14
|
+
#
|
15
|
+
# Registered messages without response are removed after timeout (Configuration.response_timeout)
|
16
|
+
#
|
17
|
+
# All type of responses/timeout can be sent to registered callbacks (@see :register_message)
|
18
|
+
#
|
19
|
+
# Use "start" and "stop" methods to start/stop listening on Kafka
|
20
|
+
class Client::ResponseWorker
|
21
|
+
attr_reader :started
|
22
|
+
alias started? started
|
23
|
+
|
24
|
+
attr_accessor :received_messages
|
25
|
+
|
26
|
+
def initialize(config, logger)
|
27
|
+
self.config = config
|
28
|
+
self.lock = Mutex.new
|
29
|
+
self.timeout_lock = Mutex.new
|
30
|
+
self.logger = logger
|
31
|
+
self.registered_messages = Concurrent::Map.new
|
32
|
+
self.received_messages = Concurrent::Array.new
|
33
|
+
self.started = Concurrent::AtomicBoolean.new(false)
|
34
|
+
self.workers = {}
|
35
|
+
end
|
36
|
+
|
37
|
+
# Start listening on Kafka
|
38
|
+
def start
|
39
|
+
lock.synchronize do
|
40
|
+
return if started.value
|
41
|
+
|
42
|
+
started.value = true
|
43
|
+
workers[:maintenance] = Thread.new { check_timeouts while started.value }
|
44
|
+
workers[:listener] = Thread.new { listen while started.value }
|
45
|
+
end
|
46
|
+
end
|
47
|
+
|
48
|
+
# Stop listener
|
49
|
+
def stop
|
50
|
+
lock.synchronize do
|
51
|
+
return unless started.value
|
52
|
+
|
53
|
+
started.value = false
|
54
|
+
workers[:listener]&.terminate
|
55
|
+
workers[:maintenance]&.join
|
56
|
+
end
|
57
|
+
end
|
58
|
+
|
59
|
+
# Registers message_id received by request,
|
60
|
+
# Defines response and timeout callback methods
|
61
|
+
#
|
62
|
+
# @param msg_id [String] UUID
|
63
|
+
# @param receiver [Object] any object implementing callbacks
|
64
|
+
# @param response_callback [Symbol] name of receiver's method processing responses
|
65
|
+
# @param timeout_callback [Symbol] name of receiver's method processing timeout [optional]
|
66
|
+
# @param error_callback [Symbol] name of receiver's method processing errors [optional]
|
67
|
+
def register_message(msg_id, receiver, response_callback: :response_success, timeout_callback: :response_timeout, error_callback: :response_error)
|
68
|
+
registered_messages[msg_id] = {:receiver => receiver,
|
69
|
+
:response_callback => response_callback,
|
70
|
+
:timeout_callback => timeout_callback,
|
71
|
+
:error_callback => error_callback,
|
72
|
+
:last_checked_at => Time.now.utc}
|
73
|
+
end
|
74
|
+
|
75
|
+
private
|
76
|
+
|
77
|
+
attr_accessor :config, :lock, :logger, :registered_messages, :timeout_lock, :workers
|
78
|
+
attr_writer :started
|
79
|
+
|
80
|
+
def listen
|
81
|
+
# Open a connection to the messaging service
|
82
|
+
client = ManageIQ::Messaging::Client.open(default_messaging_opts)
|
83
|
+
|
84
|
+
logger.info("Receptor Response worker started...")
|
85
|
+
client.subscribe_topic(queue_opts) do |message|
|
86
|
+
process_message(message)
|
87
|
+
end
|
88
|
+
rescue => err
|
89
|
+
logger.error("Exception in kafka listener: #{err}\n#{err.backtrace.join("\n")}")
|
90
|
+
ensure
|
91
|
+
client&.close
|
92
|
+
end
|
93
|
+
|
94
|
+
def process_message(message)
|
95
|
+
response = JSON.parse(message.payload)
|
96
|
+
|
97
|
+
if (message_id = response['in_response_to'])
|
98
|
+
logger.debug("Receptor response: Received message_id: #{message_id}")
|
99
|
+
if (callbacks = registered_messages[message_id]).present?
|
100
|
+
# Reset last_checked_at to avoid timeout in multi-response messages
|
101
|
+
reset_last_checked_at(callbacks)
|
102
|
+
|
103
|
+
if response['code'] == 0
|
104
|
+
#
|
105
|
+
# Response OK
|
106
|
+
#
|
107
|
+
message_type = response['message_type'] # "response" (with data) or "eof" (without data)
|
108
|
+
registered_messages.delete(message_id) if message_type == 'eof'
|
109
|
+
|
110
|
+
payload = response['payload']
|
111
|
+
payload = unpack_payload(payload) if message_type == 'response' && payload.kind_of?(String)
|
112
|
+
|
113
|
+
logger.debug("Receptor response: OK | message #{message_id} (#{payload})")
|
114
|
+
|
115
|
+
callbacks[:receiver].send(callbacks[:response_callback], message_id, message_type, payload)
|
116
|
+
else
|
117
|
+
#
|
118
|
+
# Response Error
|
119
|
+
#
|
120
|
+
registered_messages.delete(message_id)
|
121
|
+
|
122
|
+
logger.error("Receptor response: ERROR | message #{message_id} (#{response})")
|
123
|
+
|
124
|
+
callbacks[:receiver].send(callbacks[:error_callback], message_id, response['code'], response['payload'])
|
125
|
+
end
|
126
|
+
elsif ENV["LOG_ALL_RECEPTOR_MESSAGES"]&.to_i != 0
|
127
|
+
# noop, it's not error if not registered, can be processed by another pod
|
128
|
+
logger.debug("Receptor response unhandled: #{message_id} (#{response['code']})")
|
129
|
+
end
|
130
|
+
else
|
131
|
+
logger.error("Receptor response: Message id (in_response_to) not received! #{response}")
|
132
|
+
end
|
133
|
+
rescue JSON::ParserError => e
|
134
|
+
logger.error("Receptor response: Failed to parse Kafka response (#{e.message})\n#{message.payload}")
|
135
|
+
rescue => e
|
136
|
+
logger.error("Receptor response: #{e}\n#{e.backtrace.join("\n")}")
|
137
|
+
ensure
|
138
|
+
message.ack unless config.queue_auto_ack
|
139
|
+
end
|
140
|
+
|
141
|
+
def check_timeouts(threshold = config.response_timeout)
|
142
|
+
expired = []
|
143
|
+
#
|
144
|
+
# STEP 1 Collect expired messages
|
145
|
+
#
|
146
|
+
registered_messages.each_pair do |message_id, callbacks|
|
147
|
+
timeout_lock.synchronize do
|
148
|
+
if callbacks[:last_checked_at] < Time.now.utc - threshold
|
149
|
+
expired << message_id
|
150
|
+
end
|
151
|
+
end
|
152
|
+
end
|
153
|
+
|
154
|
+
#
|
155
|
+
# STEP 2 Remove expired messages, send timeout callbacks
|
156
|
+
#
|
157
|
+
expired.each do |message_id|
|
158
|
+
callbacks = registered_messages.delete(message_id)
|
159
|
+
if callbacks[:receiver].respond_to?(callbacks[:timeout_callback])
|
160
|
+
callbacks[:receiver].send(callbacks[:timeout_callback], message_id)
|
161
|
+
end
|
162
|
+
end
|
163
|
+
|
164
|
+
sleep(config.response_timeout_poll_time)
|
165
|
+
rescue => err
|
166
|
+
logger.error("Exception in maintenance worker: #{err}\n#{err.backtrace.join("\n")}")
|
167
|
+
end
|
168
|
+
|
169
|
+
# GZIP recognition
|
170
|
+
# https://tools.ietf.org/html/rfc1952#page-5
|
171
|
+
def gzipped?(data)
|
172
|
+
sign = data.to_s.bytes[0..1]
|
173
|
+
|
174
|
+
sign[0] == '0x1f'.hex && sign[1] == '0x8b'.hex
|
175
|
+
end
|
176
|
+
|
177
|
+
# Tries to decompress String response
|
178
|
+
# If not a gzip, it's a String error from receptor node
|
179
|
+
def unpack_payload(data)
|
180
|
+
decoded = Base64.decode64(data)
|
181
|
+
if gzipped?(decoded)
|
182
|
+
gz = Zlib::GzipReader.new(StringIO.new(decoded))
|
183
|
+
JSON.parse(gz.read)
|
184
|
+
else
|
185
|
+
data
|
186
|
+
end
|
187
|
+
end
|
188
|
+
|
189
|
+
# Reset last_checked_at to avoid timeout in multi-response messages
|
190
|
+
def reset_last_checked_at(callbacks)
|
191
|
+
timeout_lock.synchronize do
|
192
|
+
callbacks[:last_checked_at] = Time.now.utc
|
193
|
+
end
|
194
|
+
end
|
195
|
+
|
196
|
+
# No persist_ref here, because all instances (pods) needs to receive kafka message
|
197
|
+
def queue_opts
|
198
|
+
opts = {:service => config.queue_topic,
|
199
|
+
:auto_ack => config.queue_auto_ack}
|
200
|
+
opts[:max_bytes] = config.queue_max_bytes if config.queue_max_bytes
|
201
|
+
opts[:persist_ref] = config.queue_persist_ref if config.queue_persist_ref
|
202
|
+
opts
|
203
|
+
end
|
204
|
+
|
205
|
+
def default_messaging_opts
|
206
|
+
{
|
207
|
+
:host => config.queue_host,
|
208
|
+
:port => config.queue_port,
|
209
|
+
:protocol => :Kafka,
|
210
|
+
:client_ref => "receptor_client-responses-#{Time.now.to_i}", # A reference string to identify the client
|
211
|
+
}
|
212
|
+
end
|
213
|
+
end
|
214
|
+
end
|