manageiq-messaging 0.1.0

Sign up to get free protection for your applications and to get access to all the features.
Files changed (41) hide show
  1. checksums.yaml +7 -0
  2. data/.codeclimate.yml +47 -0
  3. data/.gitignore +10 -0
  4. data/.rspec +2 -0
  5. data/.rubocop.yml +4 -0
  6. data/.rubocop_cc.yml +5 -0
  7. data/.rubocop_local.yml +2 -0
  8. data/.travis.yml +10 -0
  9. data/CHANGES +2 -0
  10. data/CODE_OF_CONDUCT.md +74 -0
  11. data/Gemfile +4 -0
  12. data/LICENSE.txt +21 -0
  13. data/README.md +171 -0
  14. data/Rakefile +6 -0
  15. data/bin/console +14 -0
  16. data/bin/setup +8 -0
  17. data/examples/README.md +16 -0
  18. data/examples/background_job.rb +36 -0
  19. data/examples/common.rb +40 -0
  20. data/examples/message.rb +42 -0
  21. data/lib/manageiq-messaging.rb +1 -0
  22. data/lib/manageiq/messaging.rb +24 -0
  23. data/lib/manageiq/messaging/client.rb +205 -0
  24. data/lib/manageiq/messaging/common.rb +36 -0
  25. data/lib/manageiq/messaging/kafka.rb +7 -0
  26. data/lib/manageiq/messaging/kafka/background_job.rb +13 -0
  27. data/lib/manageiq/messaging/kafka/client.rb +91 -0
  28. data/lib/manageiq/messaging/kafka/common.rb +105 -0
  29. data/lib/manageiq/messaging/kafka/queue.rb +41 -0
  30. data/lib/manageiq/messaging/kafka/topic.rb +28 -0
  31. data/lib/manageiq/messaging/null_logger.rb +11 -0
  32. data/lib/manageiq/messaging/received_message.rb +11 -0
  33. data/lib/manageiq/messaging/stomp.rb +7 -0
  34. data/lib/manageiq/messaging/stomp/background_job.rb +61 -0
  35. data/lib/manageiq/messaging/stomp/client.rb +85 -0
  36. data/lib/manageiq/messaging/stomp/common.rb +84 -0
  37. data/lib/manageiq/messaging/stomp/queue.rb +53 -0
  38. data/lib/manageiq/messaging/stomp/topic.rb +37 -0
  39. data/lib/manageiq/messaging/version.rb +5 -0
  40. data/manageiq-messaging.gemspec +33 -0
  41. metadata +210 -0
@@ -0,0 +1,105 @@
1
+ module ManageIQ
2
+ module Messaging
3
+ module Kafka
4
+ module Common
5
+ require 'manageiq/messaging/common'
6
+ include ManageIQ::Messaging::Common
7
+
8
+ GROUP_FOR_QUEUE_MESSAGES = 'manageiq_messaging_queue_group'.freeze
9
+
10
+ private
11
+
12
+ def producer
13
+ @producer ||= kafka_client.producer
14
+ end
15
+
16
+ def topic_consumer(persist_ref)
17
+ # persist_ref enables consumer to receive messages sent when consumer is temporarily offline
18
+ # it also enables consumers to do load balancing when multiple consumers join the with the same ref.
19
+ @consumer.try(:stop) unless @persist_ref == persist_ref
20
+ @persist_ref = persist_ref
21
+ @topic_consumer ||= kafka_client.consumer(:group_id => persist_ref)
22
+ end
23
+
24
+ def queue_consumer
25
+ # all queue consumers join the same group so that each message can be processed by one and only one consumer
26
+ @queue_consumer ||= kafka_client.consumer(:group_id => GROUP_FOR_QUEUE_MESSAGES)
27
+ end
28
+
29
+ trap("TERM") do
30
+ @consumer.try(:stop)
31
+ @consumer = nil
32
+ end
33
+
34
+ def raw_publish(commit, body, options)
35
+ producer.produce(encode_body(options[:headers], body), options)
36
+ producer.deliver_messages if commit
37
+ logger.info("Published to topic(#{options[:topic]}), msg(#{payload_log(body.inspect)})")
38
+ end
39
+
40
+ def queue_for_publish(options)
41
+ body, kafka_opts = for_publish(options)
42
+ kafka_opts[:headers][:message_type] = options[:message] if options[:message]
43
+ kafka_opts[:headers][:class_name] = options[:class_name] if options[:class_name]
44
+
45
+ [body, kafka_opts]
46
+ end
47
+
48
+ def topic_for_publish(options)
49
+ body, kafka_opts = for_publish(options)
50
+ kafka_opts[:headers][:event_type] = options[:event] if options[:event]
51
+
52
+ [body, kafka_opts]
53
+ end
54
+
55
+ def for_publish(options)
56
+ kafka_opts = {:topic => address(options), :headers => {}}
57
+ kafka_opts[:partition_key] = options[:group_name] if options[:group_name]
58
+ kafka_opts[:headers][:sender] = options[:sender] if options[:sender]
59
+
60
+ body = options[:payload] || ''
61
+
62
+ [body, kafka_opts]
63
+ end
64
+
65
+ def address(options)
66
+ if options[:affinity]
67
+ "#{options[:service]}.#{options[:affinity]}"
68
+ else
69
+ options[:service]
70
+ end
71
+ end
72
+
73
+ def process_queue_message(queue, message)
74
+ payload = decode_body(message.headers, message.value)
75
+ sender, message_type, class_name = parse_message_headers(message.headers)
76
+ logger.info("Message received: queue(#{queue}), message(#{payload_log(payload)}), sender(#{sender}), type(#{message_type})")
77
+ [sender, message_type, class_name, payload]
78
+ end
79
+
80
+ def process_topic_message(topic, message)
81
+ begin
82
+ payload = decode_body(message.headers, message.value)
83
+ sender, event_type = parse_event_headers(message.headers)
84
+ logger.info("Event received: topic(#{topic}), event(#{payload_log(payload)}), sender(#{sender}), type(#{event_type})")
85
+ yield sender, event_type, payload
86
+ logger.info("Event processed")
87
+ rescue StandardError => e
88
+ logger.error("Event processing error: #{e.message}")
89
+ logger.error(e.backtrace.join("\n"))
90
+ end
91
+ end
92
+
93
+ def parse_message_headers(headers)
94
+ return [nil, nil, nil] unless headers.kind_of?(Hash)
95
+ headers.values_at('sender', 'message_type', 'class_name')
96
+ end
97
+
98
+ def parse_event_headers(headers)
99
+ return [nil, nil] unless headers.kind_of?(Hash)
100
+ headers.values_at('sender', 'event_type')
101
+ end
102
+ end
103
+ end
104
+ end
105
+ end
@@ -0,0 +1,41 @@
1
+ module ManageIQ
2
+ module Messaging
3
+ module Kafka
4
+ module Queue
5
+ private
6
+
7
+ def publish_message_impl(options)
8
+ raise ArgumentError, "Kafka messaging implementation does not take a block" if block_given?
9
+ raw_publish(true, *queue_for_publish(options))
10
+ end
11
+
12
+ def publish_messages_impl(messages)
13
+ messages.each { |msg_options| raw_publish(false, *queue_for_publish(msg_options)) }
14
+ producer.deliver_messages
15
+ end
16
+
17
+ def subscribe_messages_impl(options)
18
+ topic = address(options)
19
+
20
+ consumer = queue_consumer
21
+ consumer.subscribe(topic)
22
+ consumer.each_batch do |batch|
23
+ logger.info("Batch message received: queue(#{topic})")
24
+ begin
25
+ messages = batch.messages.collect do |message|
26
+ sender, message_type, _class_name, payload = process_queue_message(topic, message)
27
+ ManageIQ::Messaging::ReceivedMessage.new(sender, message_type, payload, nil)
28
+ end
29
+
30
+ yield messages
31
+ rescue StandardError => e
32
+ logger.error("Event processing error: #{e.message}")
33
+ logger.error(e.backtrace.join("\n"))
34
+ end
35
+ logger.info("Batch message processed")
36
+ end
37
+ end
38
+ end
39
+ end
40
+ end
41
+ end
@@ -0,0 +1,28 @@
1
+ module ManageIQ
2
+ module Messaging
3
+ module Kafka
4
+ module Topic
5
+ private
6
+
7
+ def publish_topic_impl(options)
8
+ raw_publish(true, *topic_for_publish(options))
9
+ end
10
+
11
+ def subscribe_topic_impl(options, &block)
12
+ topic = address(options)
13
+ persist_ref = options[:persist_ref]
14
+
15
+ if persist_ref
16
+ consumer = topic_consumer(persist_ref)
17
+ consumer.subscribe(topic, :start_from_beginning => false)
18
+ consumer.each_message { |message| process_topic_message(topic, message, &block) }
19
+ else
20
+ kafka_client.each_message(:topic => topic, :start_from_beginning => false) do |message|
21
+ process_topic_message(topic, message, &block)
22
+ end
23
+ end
24
+ end
25
+ end
26
+ end
27
+ end
28
+ end
@@ -0,0 +1,11 @@
1
+ require 'logger'
2
+
3
+ module ManageIQ
4
+ module Messaging
5
+ class NullLogger < Logger
6
+ def initialize(*_args); end
7
+
8
+ def add(*_args, &_block); end
9
+ end
10
+ end
11
+ end
@@ -0,0 +1,11 @@
1
+ module ManageIQ
2
+ module Messaging
3
+ class ReceivedMessage
4
+ attr_accessor :sender, :message, :payload, :ack_ref
5
+
6
+ def initialize(sender, message, payload, ack_ref)
7
+ @sender, @message, @payload, @ack_ref = sender, message, payload, ack_ref
8
+ end
9
+ end
10
+ end
11
+ end
@@ -0,0 +1,7 @@
1
+ module ManageIQ
2
+ module Messaging
3
+ module Stomp
4
+ autoload :Client, 'manageiq/messaging/stomp/client'
5
+ end
6
+ end
7
+ end
@@ -0,0 +1,61 @@
1
+ module ManageIQ
2
+ module Messaging
3
+ module Stomp
4
+ module BackgroundJob
5
+ private
6
+
7
+ def subscribe_background_job_impl(options)
8
+ queue_name, headers = queue_for_subscribe(options)
9
+
10
+ subscribe(queue_name, headers) do |msg|
11
+ begin
12
+ ack(msg)
13
+ assert_options(msg.headers, ['class_name', 'message_type'])
14
+
15
+ msg_options = decode_body(msg.headers, msg.body)
16
+ msg_options = {} if msg_options.empty?
17
+ logger.info("Processing background job: queue(#{queue_name}), job(#{msg_options.inspect}), headers(#{msg.headers})")
18
+ result = run_job(msg_options.merge(:class_name => msg.headers['class_name'], :method_name => msg.headers['message_type']))
19
+ logger.info("Background job completed")
20
+
21
+ correlation_ref = msg.headers['correlation_id']
22
+ send_response(options[:service], correlation_ref, result) if correlation_ref
23
+ rescue Timeout::Error
24
+ logger.warn("Background job timed out")
25
+ if Object.const_defined?('ActiveRecord::Base')
26
+ begin
27
+ logger.info("Reconnecting to DB after timeout error during queue deliver")
28
+ ActiveRecord::Base.connection.reconnect!
29
+ rescue => err
30
+ logger.error("Error encountered during <ActiveRecord::Base.connection.reconnect!> error:#{err.class.name}: #{err.message}")
31
+ end
32
+ end
33
+ rescue => e
34
+ logger.error("Background job error: #{e.message}")
35
+ logger.error(e.backtrace.join("\n"))
36
+ end
37
+ end
38
+ end
39
+
40
+ def run_job(options)
41
+ assert_options(options, [:class_name, :method_name])
42
+
43
+ instance_id = options[:instance_id]
44
+ args = options[:args]
45
+ miq_callback = options[:miq_callback]
46
+
47
+ obj = Object.const_get(options[:class_name])
48
+ obj = obj.find(instance_id) if instance_id
49
+
50
+ msg_timeout = 600 # TODO: configurable per message
51
+ result = Timeout.timeout(msg_timeout) do
52
+ obj.send(options[:method_name], *args)
53
+ end
54
+
55
+ run_job(miq_callback) if miq_callback
56
+ result
57
+ end
58
+ end
59
+ end
60
+ end
61
+ end
@@ -0,0 +1,85 @@
1
+ module ManageIQ
2
+ module Messaging
3
+ module Stomp
4
+ # Messaging client implementation using Stomp protocol with ActiveMQ Artemis being
5
+ # the underlying supporting system.
6
+ # Do not directly instantiate an instance from this class. Use
7
+ # +ManageIQ::Messaging::Client.open+ method.
8
+ #
9
+ # Artemis specific connection options accepted by +open+ method:
10
+ # * :client_ref (A reference string to identify the client)
11
+ # * :host (Single host name)
12
+ # * :port (host port number)
13
+ # * :username
14
+ # * :password
15
+ # * :heartbeat (Whether the client should do heart-beating. Default to true)
16
+ #
17
+ # Artemis specific +publish_message+ options:
18
+ # * :expires_on
19
+ # * :deliver_on
20
+ # * :priority
21
+ # * :group_name
22
+ #
23
+ # Artemis specific +publish_topic+ options:
24
+ # * :expires_on
25
+ # * :deliver_on
26
+ # * :priority
27
+ #
28
+ # Artemis specific +subscribe_topic+ options:
29
+ # * :persist_ref
30
+ #
31
+ # +:persist_ref+ must be paired with +:client_ref+ option in +Client.open+ method.
32
+ # They jointly create a unique group name. Without such group every topic subscriber
33
+ # receives a copy of each message only when they are active. This is the default.
34
+ # If multiple topic subscribers join with the same group each message is consumed
35
+ # by only one of the subscribers. This allows a load balancing among the subscribers.
36
+ # Also any messages sent when all members of the group are offline will be persisted
37
+ # and delivered when any member in the group is back online. Each message is still
38
+ # copied and delivered to other subscribes belongs to other groups or no group.
39
+ #
40
+ # Artemis specific +subscribe_messages+ options:
41
+ # * :limit ()
42
+ class Client < ManageIQ::Messaging::Client
43
+ require 'stomp'
44
+ require 'manageiq/messaging/stomp/common'
45
+ require 'manageiq/messaging/stomp/queue'
46
+ require 'manageiq/messaging/stomp/background_job'
47
+ require 'manageiq/messaging/stomp/topic'
48
+
49
+ include Common
50
+ include Queue
51
+ include BackgroundJob
52
+ include Topic
53
+
54
+ private *delegate(:subscribe, :unsubscribe, :publish, :to => :stomp_client)
55
+ delegate :ack, :close, :to => :stomp_client
56
+
57
+ attr_accessor :encoding
58
+
59
+ private
60
+
61
+ attr_reader :stomp_client
62
+
63
+ def initialize(options)
64
+ host = options.slice(:host, :port)
65
+ host[:passcode] = options[:password] if options[:password]
66
+ host[:login] = options[:username] if options[:username]
67
+
68
+ headers = {}
69
+ if options[:heartbeat].nil? || options[:heartbeat]
70
+ headers.merge!(
71
+ :host => options[:host],
72
+ :"accept-version" => "1.2",
73
+ :"heart-beat" => "2000,0"
74
+ )
75
+ end
76
+ headers[:"client-id"] = options[:client_ref] if options[:client_ref]
77
+
78
+ @encoding = options[:encoding] || 'yaml'
79
+ require "json" if @encoding == "json"
80
+ @stomp_client = ::Stomp::Client.new(:hosts => [host], :connect_headers => headers)
81
+ end
82
+ end
83
+ end
84
+ end
85
+ end
@@ -0,0 +1,84 @@
1
+ module ManageIQ
2
+ module Messaging
3
+ module Stomp
4
+ module Common
5
+ require 'manageiq/messaging/common'
6
+ include ManageIQ::Messaging::Common
7
+
8
+ private
9
+
10
+ def raw_publish(address, body, headers)
11
+ publish(address, encode_body(headers, body), headers)
12
+ logger.info("Published to address(#{address}), msg(#{payload_log(body.inspect)}), headers(#{headers.inspect})")
13
+ end
14
+
15
+ def queue_for_publish(options)
16
+ affinity = options[:affinity] || 'none'
17
+ address = "queue/#{options[:service]}.#{affinity}"
18
+
19
+ headers = {:"destination-type" => 'ANYCAST', :persistent => true}
20
+ headers[:expires] = options[:expires_on].to_i * 1000 if options[:expires_on]
21
+ headers[:AMQ_SCHEDULED_TIME] = options[:deliver_on].to_i * 1000 if options[:deliver_on]
22
+ headers[:priority] = options[:priority] if options[:priority]
23
+ headers[:_AMQ_GROUP_ID] = options[:group_name] if options[:group_name]
24
+
25
+ [address, headers]
26
+ end
27
+
28
+ def queue_for_subscribe(options)
29
+ affinity = options[:affinity] || 'none'
30
+ queue_name = "queue/#{options[:service]}.#{affinity}"
31
+
32
+ headers = {:"subscription-type" => 'ANYCAST', :ack => 'client'}
33
+
34
+ [queue_name, headers]
35
+ end
36
+
37
+ def topic_for_publish(options)
38
+ address = "topic/#{options[:service]}"
39
+
40
+ headers = {:"destination-type" => 'MULTICAST', :persistent => true}
41
+ headers[:expires] = options[:expires_on].to_i * 1000 if options[:expires_on]
42
+ headers[:AMQ_SCHEDULED_TIME] = options[:deliver_on].to_i * 1000 if options[:deliver_on]
43
+ headers[:priority] = options[:priority] if options[:priority]
44
+
45
+ [address, headers]
46
+ end
47
+
48
+ def topic_for_subscribe(options)
49
+ queue_name = "topic/#{options[:service]}"
50
+
51
+ headers = {:"subscription-type" => 'MULTICAST', :ack => 'client'}
52
+ headers[:"durable-subscription-name"] = options[:persist_ref] if options[:persist_ref]
53
+
54
+ [queue_name, headers]
55
+ end
56
+
57
+ def send_response(service, correlation_ref, result)
58
+ response_options = {
59
+ :service => "#{service}.response",
60
+ :affinity => correlation_ref
61
+ }
62
+ address, response_headers = queue_for_publish(response_options)
63
+ raw_publish(address, result || '', response_headers.merge(:correlation_id => correlation_ref))
64
+ end
65
+
66
+ def receive_response(service, correlation_ref)
67
+ response_options = {
68
+ :service => "#{service}.response",
69
+ :affinity => correlation_ref
70
+ }
71
+ queue_name, response_headers = queue_for_subscribe(response_options)
72
+ subscribe(queue_name, response_headers) do |msg|
73
+ ack(msg)
74
+ begin
75
+ yield decode_body(msg.headers, msg.body)
76
+ ensure
77
+ unsubscribe(queue_name)
78
+ end
79
+ end
80
+ end
81
+ end
82
+ end
83
+ end
84
+ end
@@ -0,0 +1,53 @@
1
+ module ManageIQ
2
+ module Messaging
3
+ module Stomp
4
+ module Queue
5
+ private
6
+
7
+ def publish_message_impl(options, &block)
8
+ address, headers = queue_for_publish(options)
9
+ headers[:sender] = options[:sender] if options[:sender]
10
+ headers[:message_type] = options[:message] if options[:message]
11
+ headers[:class_name] = options[:class_name] if options[:class_name]
12
+ headers[:correlation_id] = Time.now.to_i.to_s if block_given?
13
+
14
+ raw_publish(address, options[:payload] || '', headers)
15
+
16
+ return unless block_given?
17
+
18
+ receive_response(options[:service], headers[:correlation_id], &block)
19
+ end
20
+
21
+ def publish_messages_impl(messages)
22
+ messages.each { |msg_options| publish_message(msg_options) }
23
+ end
24
+
25
+ def subscribe_messages_impl(options)
26
+ queue_name, headers = queue_for_subscribe(options)
27
+
28
+ # for STOMP we can get message one at a time
29
+ subscribe(queue_name, headers) do |msg|
30
+ begin
31
+ sender = msg.headers['sender']
32
+ message_type = msg.headers['message_type']
33
+ message_body = decode_body(msg.headers, msg.body)
34
+ logger.info("Message received: queue(#{queue_name}), msg(#{payload_log(message_body)}), headers(#{msg.headers})")
35
+
36
+ result = yield [ManageIQ::Messaging::ReceivedMessage.new(sender, message_type, message_body, msg)]
37
+ logger.info("Message processed")
38
+
39
+ correlation_ref = msg.headers['correlation_id']
40
+ if correlation_ref
41
+ result = result.first if result.kind_of?(Array)
42
+ send_response(options[:service], correlation_ref, result)
43
+ end
44
+ rescue => e
45
+ logger.error("Message processing error: #{e.message}")
46
+ logger.error(e.backtrace.join("\n"))
47
+ end
48
+ end
49
+ end
50
+ end
51
+ end
52
+ end
53
+ end