manageiq-messaging 0.1.7 → 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: af8f638cb1bd874e3e209f00fe649e680db356572ebf5d1195cc2a3526ee912a
4
- data.tar.gz: 162a9f8ad7a07b7bd0748b9d943e1707d8f68829ce486863b933f033e8f165e0
3
+ metadata.gz: aa6536d137d3873d7b73cecfc02add52d949bf4c240846b9df4c0a46b02be560
4
+ data.tar.gz: 240ffe6f8dacc50a80759335f4d748dc826675cf38e0567117809582580489a0
5
5
  SHA512:
6
- metadata.gz: a4228dbc8bd9735de39f6df90ce7f968cd17458f9e4df7c736f7baaa6886d4acbc288dda04370b452efb09169d1e5fe9428581b92e915248875699c89c3a2ac7
7
- data.tar.gz: 1864ed66a0bef7de3790b53abfe826f6c29e020a28507148b37dee2e3a56aec0c04c931c0f02b7968d22ff96e211f55bf1ef0399f0b3c2af10edc9068d4a167c
6
+ metadata.gz: 45b6e6592e772ffc7373489116a72403d5b6fb2a2e322b4d5d6c629ee6eddd575c3eaf1e7d92afe504b361eaaaa46047e0d85b8ed640272d41098b5d185d8a45
7
+ data.tar.gz: e95db7f79892be46eb800e4f241b932282cdebf2f4ffb30a65bfea8a85f24161b9fdc5afbca4d182170aa7fb33f85edcf59881b51a8be80db868bf65bbf6da5b
data/.travis.yml CHANGED
@@ -1,8 +1,9 @@
1
1
  sudo: false
2
2
  language: ruby
3
3
  rvm:
4
- - 2.3.1
5
4
  - 2.4.5
5
+ - 2.5.7
6
+ - 2.6.5
6
7
  before_install: gem install bundler -v 1.13.0
7
8
  after_script: bundle exec codeclimate-test-reporter
8
9
  sudo: false
data/CHANGES CHANGED
@@ -18,5 +18,5 @@
18
18
  = 0.1.6 - 6-July-2020
19
19
  * Rescue message body decoding errors. Re-raise errors raised by users code of processing received messages.
20
20
 
21
- = 0.1.7 - 14-May-2021
22
- * Loosen activesupport dependency to ~> 5.2
21
+ = 1.0.0 - 28-Sep-2020
22
+ * Switch to use rdkafka client
@@ -40,6 +40,7 @@ module ManageIQ
40
40
  def self.open(options)
41
41
  protocol = options[:protocol] || :Stomp
42
42
  client = Object.const_get("ManageIQ::Messaging::#{protocol}::Client").new(options)
43
+
43
44
  return client unless block_given?
44
45
 
45
46
  begin
@@ -18,7 +18,7 @@ module ManageIQ
18
18
 
19
19
  def decode_body(headers, raw_body)
20
20
  return raw_body unless headers.kind_of?(Hash)
21
- case headers["encoding"]
21
+ case headers["encoding"] || headers[:encoding]
22
22
  when "json"
23
23
  JSON.parse(raw_body)
24
24
  when "yaml"
@@ -10,16 +10,11 @@ module ManageIQ
10
10
  # * :hosts (Array of Kafka cluster hosts, or)
11
11
  # * :host (Single host name)
12
12
  # * :port (host port number)
13
- # * :ssl_ca_cert (security options)
14
- # * :ssl_client_cert
15
- # * :ssl_client_cert_key
16
- # * :sasl_gssapi_principal
17
- # * :sasl_gssapi_keytab
18
- # * :sasl_plain_username
19
- # * :sasl_plain_password
20
- # * :sasl_scram_username
21
- # * :sasl_scram_password
22
- # * :sasl_scram_mechanism
13
+ #
14
+ # For additional security options, please refer to
15
+ # https://github.com/edenhill/librdkafka/wiki/Using-SSL-with-librdkafka and
16
+ # https://github.com/edenhill/librdkafka/wiki/Using-SASL-with-librdkafka
17
+ #
23
18
  #
24
19
  # Kafka specific +publish_message+ options:
25
20
  # * :group_name (Used as Kafka partition_key)
@@ -42,7 +37,7 @@ module ManageIQ
42
37
  #
43
38
  # +subscribe_background_job+ is currently not implemented.
44
39
  class Client < ManageIQ::Messaging::Client
45
- require 'kafka'
40
+ require 'rdkafka'
46
41
  require 'manageiq/messaging/kafka/common'
47
42
  require 'manageiq/messaging/kafka/queue'
48
43
  require 'manageiq/messaging/kafka/background_job'
@@ -53,27 +48,27 @@ module ManageIQ
53
48
  include BackgroundJob
54
49
  include Topic
55
50
 
56
- private *delegate(:subscribe, :unsubscribe, :publish, :to => :kafka_client)
57
- delegate :close, :to => :kafka_client
58
-
59
51
  attr_accessor :encoding
60
52
 
61
53
  def ack(ack_ref)
62
- @queue_consumer.try(:mark_message_as_processed, ack_ref)
63
- @topic_consumer.try(:mark_message_as_processed, ack_ref)
54
+ ack_ref.commit
55
+ rescue Rdkafka::RdkafkaError => e
56
+ logger.warn("ack failed with error #{e.message}")
57
+ raise unless e.message =~ /no_offset/
64
58
  end
65
59
 
66
60
  def close
67
- @topic_consumer.try(:stop)
68
- @topic_consumer = nil
69
- @queue_consumer.try(:stop)
70
- @queue_consumer = nil
71
-
72
- @producer.try(:shutdown)
61
+ @producer&.close
73
62
  @producer = nil
74
63
 
75
- kafka_client.close
76
- @kafka_client = nil
64
+ @consumer&.close
65
+ @consumer = nil
66
+ end
67
+
68
+ # list all topics
69
+ def topics
70
+ native_kafka = producer.instance_variable_get(:@native_kafka)
71
+ Rdkafka::Metadata.new(native_kafka).topics.collect { |topic| topic[:topic_name] }
77
72
  end
78
73
 
79
74
  private
@@ -87,12 +82,11 @@ module ManageIQ
87
82
  @encoding = options[:encoding] || 'yaml'
88
83
  require "json" if @encoding == "json"
89
84
 
90
- connection_opts = {}
91
- connection_opts[:client_id] = options[:client_ref] if options[:client_ref]
92
-
93
- connection_opts.merge!(options.slice(:ssl_ca_cert, :ssl_client_cert, :ssl_client_cert_key, :sasl_gssapi_principal, :sasl_gssapi_keytab, :sasl_plain_username, :sasl_plain_password, :sasl_scram_username, :sasl_scram_password, :sasl_scram_mechanism))
85
+ connection_opts = {:"bootstrap.servers" => hosts.join(',')}
86
+ connection_opts[:"client.id"] = options[:client_ref] if options[:client_ref]
94
87
 
95
- @kafka_client = ::Kafka.new(hosts, connection_opts)
88
+ ::Rdkafka::Config.logger = logger
89
+ @kafka_client = ::Rdkafka::Config.new(connection_opts)
96
90
  end
97
91
  end
98
92
  end
@@ -5,48 +5,28 @@ module ManageIQ
5
5
  require 'manageiq/messaging/common'
6
6
  include ManageIQ::Messaging::Common
7
7
 
8
- GROUP_FOR_QUEUE_MESSAGES = 'manageiq_messaging_queue_group_'.freeze
9
-
10
8
  private
11
9
 
12
10
  def producer
13
11
  @producer ||= kafka_client.producer
14
12
  end
15
13
 
16
- def topic_consumer(persist_ref, session_timeout = nil)
17
- # persist_ref enables consumer to receive messages sent when consumer is temporarily offline
18
- # it also enables consumers to do load balancing when multiple consumers join the with the same ref.
19
- @topic_consumer.try(:stop) unless @persist_ref == persist_ref
20
- @persist_ref = persist_ref
21
-
22
- consumer_opts = {:group_id => persist_ref}
23
- consumer_opts[:session_timeout] = session_timeout if session_timeout.present?
24
-
25
- @topic_consumer ||= kafka_client.consumer(consumer_opts)
26
- end
27
-
28
- def queue_consumer(topic, session_timeout = nil)
29
- # all queue consumers join the same group so that each message can be processed by one and only one consumer
30
- @queue_consumer.try(:stop) unless @queue_topic == topic
31
- @queue_topic = topic
32
-
33
- consumer_opts = {:group_id => GROUP_FOR_QUEUE_MESSAGES + topic}
34
- consumer_opts[:session_timeout] = session_timeout if session_timeout.present?
35
-
36
- @queue_consumer ||= kafka_client.consumer(consumer_opts)
14
+ def consumer(beginning, options)
15
+ @consumer&.close
16
+ kafka_client[:"group.id"] = options[:persist_ref]
17
+ kafka_client[:"auto.offset.reset"] = beginning ? 'smallest' : 'largest'
18
+ kafka_client[:"enable.auto.commit"] = !!auto_ack?(options)
19
+ kafka_client[:"session.timeout.ms"] = options[:session_timeout] * 1000 if options[:session_timeout].present?
20
+ kafka_client[:"group.instance.id"] = options[:group_instance_id] if options[:group_instance_id].present?
21
+ @consumer = kafka_client.consumer
37
22
  end
38
23
 
39
- trap("TERM") do
40
- @topic_consumer.try(:stop)
41
- @topic_consumer = nil
42
- @queue_consumer.try(:stop)
43
- @queue_consumer = nil
44
- end
45
-
46
- def raw_publish(commit, body, options)
47
- producer.produce(encode_body(options[:headers], body), options)
48
- producer.deliver_messages if commit
49
- logger.info("Published to topic(#{options[:topic]}), msg(#{payload_log(body.inspect)})")
24
+ def raw_publish(wait, body, options)
25
+ options[:payload] = encode_body(options[:headers], body)
26
+ producer.produce(options).tap do |handle|
27
+ handle.wait if wait
28
+ logger.info("Published to topic(#{options[:topic]}), msg(#{payload_log(body.inspect)})")
29
+ end
50
30
  end
51
31
 
52
32
  def queue_for_publish(options)
@@ -68,8 +48,8 @@ module ManageIQ
68
48
 
69
49
  def for_publish(options)
70
50
  kafka_opts = {:topic => address(options)}
71
- kafka_opts[:partition_key] = options[:group_name] if options[:group_name]
72
- kafka_opts[:headers] = {}
51
+ kafka_opts[:partition_key] = options[:group_name] if options[:group_name]
52
+ kafka_opts[:headers] = {}
73
53
  kafka_opts[:headers][:sender] = options[:sender] if options[:sender]
74
54
 
75
55
  body = options[:payload] || ''
@@ -85,23 +65,30 @@ module ManageIQ
85
65
  end
86
66
  end
87
67
 
88
- def process_queue_message(queue, message)
89
- payload = decode_body(message.headers, message.value)
90
- sender, message_type, class_name = parse_message_headers(message.headers)
91
- client_headers = message.headers.except(*message_header_keys)
68
+ def process_queue_message(queue_consumer, queue, message)
69
+ begin
70
+ payload = decode_body(message.headers, message.payload)
71
+ sender, message_type, _class_name = parse_message_headers(message.headers)
72
+ client_headers = message.headers.except(*message_header_keys).with_indifferent_access
92
73
 
93
- logger.info("Message received: queue(#{queue}), message(#{payload_log(payload)}), sender(#{sender}), type(#{message_type})")
94
- [sender, message_type, class_name, payload, client_headers]
74
+ logger.info("Message received: queue(#{queue}), message(#{payload_log(payload)}), sender(#{sender}), type(#{message_type})")
75
+ yield [ManageIQ::Messaging::ReceivedMessage.new(sender, message_type, payload, client_headers, queue_consumer, self)]
76
+ logger.info("Messsage processed")
77
+ rescue StandardError => e
78
+ logger.error("Message processing error: #{e.message}")
79
+ logger.error(e.backtrace.join("\n"))
80
+ raise
81
+ end
95
82
  end
96
83
 
97
- def process_topic_message(topic, message)
84
+ def process_topic_message(topic_consumer, topic, message)
98
85
  begin
99
- payload = decode_body(message.headers, message.value)
86
+ payload = decode_body(message.headers, message.payload)
100
87
  sender, event_type = parse_event_headers(message.headers)
101
- client_headers = message.headers.except(*event_header_keys)
88
+ client_headers = message.headers.except(*event_header_keys).with_indifferent_access
102
89
 
103
90
  logger.info("Event received: topic(#{topic}), event(#{payload_log(payload)}), sender(#{sender}), type(#{event_type})")
104
- yield ManageIQ::Messaging::ReceivedMessage.new(sender, event_type, payload, client_headers, message, self)
91
+ yield ManageIQ::Messaging::ReceivedMessage.new(sender, event_type, payload, client_headers, topic_consumer, self)
105
92
  logger.info("Event processed")
106
93
  rescue StandardError => e
107
94
  logger.error("Event processing error: #{e.message}")
@@ -111,7 +98,7 @@ module ManageIQ
111
98
  end
112
99
 
113
100
  def message_header_keys
114
- ['sender', 'message_type', 'class_name']
101
+ [:sender, :message_type, :class_name]
115
102
  end
116
103
 
117
104
  def parse_message_headers(headers)
@@ -120,12 +107,12 @@ module ManageIQ
120
107
  end
121
108
 
122
109
  def event_header_keys
123
- ['sender', 'event_type']
110
+ [:sender, :event_type]
124
111
  end
125
112
 
126
113
  def parse_event_headers(headers)
127
114
  return [nil, nil] unless headers.kind_of?(Hash)
128
- headers.values_at('sender', 'event_type')
115
+ headers.values_at(*event_header_keys)
129
116
  end
130
117
  end
131
118
  end
@@ -2,6 +2,8 @@ module ManageIQ
2
2
  module Messaging
3
3
  module Kafka
4
4
  module Queue
5
+ GROUP_FOR_QUEUE_MESSAGES = ENV['QUEUE_MESSAGES_GROUP_PREFIX'].freeze || 'manageiq_messaging_queue_group_'.freeze
6
+
5
7
  private
6
8
 
7
9
  def publish_message_impl(options)
@@ -10,35 +12,18 @@ module ManageIQ
10
12
  end
11
13
 
12
14
  def publish_messages_impl(messages)
13
- messages.each { |msg_options| raw_publish(false, *queue_for_publish(msg_options)) }
14
- producer.deliver_messages
15
+ handles = messages.collect { |msg_options| raw_publish(false, *queue_for_publish(msg_options)) }
16
+ handles.each(&:wait)
15
17
  end
16
18
 
17
- def subscribe_messages_impl(options)
19
+ def subscribe_messages_impl(options, &block)
18
20
  topic = address(options)
19
- session_timeout = options[:session_timeout]
20
-
21
- batch_options = {}
22
- batch_options[:automatically_mark_as_processed] = auto_ack?(options)
23
- batch_options[:max_bytes] = options[:max_bytes] if options.key?(:max_bytes)
24
-
25
- consumer = queue_consumer(topic, session_timeout)
26
- consumer.subscribe(topic)
27
- consumer.each_batch(batch_options) do |batch|
28
- logger.info("Batch message received: queue(#{topic})")
29
- begin
30
- messages = batch.messages.collect do |message|
31
- sender, message_type, _class_name, payload = process_queue_message(topic, message)
32
- ManageIQ::Messaging::ReceivedMessage.new(sender, message_type, payload, headers, message, self)
33
- end
21
+ options[:persist_ref] = GROUP_FOR_QUEUE_MESSAGES + topic
34
22
 
35
- yield messages
36
- rescue StandardError => e
37
- logger.error("Event processing error: #{e.message}")
38
- logger.error(e.backtrace.join("\n"))
39
- raise
40
- end
41
- logger.info("Batch message processed")
23
+ queue_consumer = consumer(true, options)
24
+ queue_consumer.subscribe(topic)
25
+ queue_consumer.each do |message|
26
+ process_queue_message(queue_consumer, topic, message, &block)
42
27
  end
43
28
  end
44
29
  end
@@ -1,7 +1,11 @@
1
+ require 'socket'
2
+
1
3
  module ManageIQ
2
4
  module Messaging
3
5
  module Kafka
4
6
  module Topic
7
+ GROUP_FOR_ADHOC_LISTENERS = Socket.gethostname.freeze
8
+
5
9
  private
6
10
 
7
11
  def publish_topic_impl(options)
@@ -10,19 +14,12 @@ module ManageIQ
10
14
 
11
15
  def subscribe_topic_impl(options, &block)
12
16
  topic = address(options)
13
- persist_ref = options[:persist_ref]
14
- session_timeout = options[:session_timeout]
15
17
 
16
- if persist_ref
17
- consumer = topic_consumer(persist_ref, session_timeout)
18
- consumer.subscribe(topic, :start_from_beginning => false)
19
- consumer.each_message(:automatically_mark_as_processed => auto_ack?(options)) do |message|
20
- process_topic_message(topic, message, &block)
21
- end
22
- else
23
- kafka_client.each_message(:topic => topic, :start_from_beginning => false) do |message|
24
- process_topic_message(topic, message, &block)
25
- end
18
+ options[:persist_ref] = "#{GROUP_FOR_ADHOC_LISTENERS}_#{Time.now.to_i}" unless options[:persist_ref]
19
+ topic_consumer = consumer(false, options)
20
+ topic_consumer.subscribe(topic)
21
+ topic_consumer.each do |message|
22
+ process_topic_message(topic_consumer, topic, message, &block)
26
23
  end
27
24
  end
28
25
  end
@@ -1,5 +1,5 @@
1
1
  module ManageIQ
2
2
  module Messaging
3
- VERSION = "0.1.7"
3
+ VERSION = "1.0.0"
4
4
  end
5
5
  end
@@ -4,9 +4,10 @@ $LOAD_PATH.unshift(lib) unless $LOAD_PATH.include?(lib)
4
4
  require 'manageiq/messaging/version'
5
5
 
6
6
  Gem::Specification.new do |spec|
7
- spec.name = "manageiq-messaging"
8
- spec.version = ManageIQ::Messaging::VERSION
9
- spec.authors = ["ManageIQ Authors"]
7
+ spec.name = "manageiq-messaging"
8
+ spec.version = ManageIQ::Messaging::VERSION
9
+ spec.required_ruby_version = '>= 2.4'
10
+ spec.authors = ["ManageIQ Authors"]
10
11
 
11
12
  spec.summary = 'Client library for ManageIQ components to exchange messages through its internal message bus.'
12
13
  spec.description = 'Client library for ManageIQ components to exchange messages through its internal message bus.'
@@ -20,8 +21,8 @@ Gem::Specification.new do |spec|
20
21
  spec.executables = spec.files.grep(%r{^exe/}) { |f| File.basename(f) }
21
22
  spec.require_paths = ["lib"]
22
23
 
23
- spec.add_dependency 'activesupport', '~> 5.2'
24
- spec.add_dependency 'ruby-kafka', '~> 1.0'
24
+ spec.add_dependency 'activesupport', '~> 5.2.4.3'
25
+ spec.add_dependency 'rdkafka', '~> 0.8'
25
26
  spec.add_dependency 'stomp', '~> 1.4.4'
26
27
 
27
28
  spec.add_development_dependency "bundler"
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: manageiq-messaging
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.1.7
4
+ version: 1.0.0
5
5
  platform: ruby
6
6
  authors:
7
7
  - ManageIQ Authors
8
8
  autorequire:
9
9
  bindir: exe
10
10
  cert_chain: []
11
- date: 2021-05-14 00:00:00.000000000 Z
11
+ date: 2020-09-29 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: activesupport
@@ -16,28 +16,28 @@ dependencies:
16
16
  requirements:
17
17
  - - "~>"
18
18
  - !ruby/object:Gem::Version
19
- version: '5.2'
19
+ version: 5.2.4.3
20
20
  type: :runtime
21
21
  prerelease: false
22
22
  version_requirements: !ruby/object:Gem::Requirement
23
23
  requirements:
24
24
  - - "~>"
25
25
  - !ruby/object:Gem::Version
26
- version: '5.2'
26
+ version: 5.2.4.3
27
27
  - !ruby/object:Gem::Dependency
28
- name: ruby-kafka
28
+ name: rdkafka
29
29
  requirement: !ruby/object:Gem::Requirement
30
30
  requirements:
31
31
  - - "~>"
32
32
  - !ruby/object:Gem::Version
33
- version: '1.0'
33
+ version: '0.8'
34
34
  type: :runtime
35
35
  prerelease: false
36
36
  version_requirements: !ruby/object:Gem::Requirement
37
37
  requirements:
38
38
  - - "~>"
39
39
  - !ruby/object:Gem::Version
40
- version: '1.0'
40
+ version: '0.8'
41
41
  - !ruby/object:Gem::Dependency
42
42
  name: stomp
43
43
  requirement: !ruby/object:Gem::Requirement
@@ -208,14 +208,14 @@ required_ruby_version: !ruby/object:Gem::Requirement
208
208
  requirements:
209
209
  - - ">="
210
210
  - !ruby/object:Gem::Version
211
- version: '0'
211
+ version: '2.4'
212
212
  required_rubygems_version: !ruby/object:Gem::Requirement
213
213
  requirements:
214
214
  - - ">="
215
215
  - !ruby/object:Gem::Version
216
216
  version: '0'
217
217
  requirements: []
218
- rubygems_version: 3.2.5
218
+ rubygems_version: 3.1.3
219
219
  signing_key:
220
220
  specification_version: 4
221
221
  summary: Client library for ManageIQ components to exchange messages through its internal