fourkites-sqspoller-v2 1.0.0
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +7 -0
- data/.gitignore +9 -0
- data/.ruby-gemset +1 -0
- data/.ruby-version +1 -0
- data/CODE_OF_CONDUCT.md +13 -0
- data/Gemfile +4 -0
- data/LICENSE +340 -0
- data/README.md +72 -0
- data/Rakefile +1 -0
- data/bin/run_sqs_poller +5 -0
- data/bin/sqs_poller.service +13 -0
- data/bin/sqs_poller_v2 +18 -0
- data/images/SQS_Poller_V2.png +0 -0
- data/install.sh +10 -0
- data/lib/config/newrelic.yml +71 -0
- data/lib/seahorse/stringio.rb +10 -0
- data/lib/sqs-ruby-example-consume-queue.rb +16 -0
- data/lib/sqs-ruby-example-create-queue.rb +107 -0
- data/lib/sqspoller/common/ring_buffer.rb +74 -0
- data/lib/sqspoller/common/utils.rb +47 -0
- data/lib/sqspoller/logger/logger.rb +40 -0
- data/lib/sqspoller/metrics/log_reporter.rb +19 -0
- data/lib/sqspoller/metrics/queue_stats_reporter.rb +42 -0
- data/lib/sqspoller/metrics/sqs_poller_metrics.rb +109 -0
- data/lib/sqspoller/poll/queue_controller.rb +141 -0
- data/lib/sqspoller/poll/queue_poller.rb +73 -0
- data/lib/sqspoller/process/message_handler.rb +53 -0
- data/lib/sqspoller/process/task_finalizer.rb +88 -0
- data/lib/sqspoller/process/task_worker.rb +50 -0
- data/lib/sqspoller/process/worker_controller.rb +81 -0
- data/lib/sqspoller/sqs_poller.rb +147 -0
- data/lib/sqspoller/version.rb +3 -0
- data/lib/sqspoller/worker_task.rb +17 -0
- data/lib/sqspoller.rb +7 -0
- data/lib/test.yaml +15 -0
- data/sqspoller.gemspec +30 -0
- metadata +180 -0
@@ -0,0 +1,107 @@
|
|
1
|
+
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
2
|
+
# SPDX - License - Identifier: Apache - 2.0
|
3
|
+
|
4
|
+
# Purpose
|
5
|
+
|
6
|
+
# This code example demonstrates how to create a queue in Amazon Simple Queue Service (Amazon SQS).
|
7
|
+
|
8
|
+
require 'aws-sdk-sqs'
|
9
|
+
require "net/http"
|
10
|
+
require "rest-client"
|
11
|
+
require "json"
|
12
|
+
require 'concurrent'
|
13
|
+
|
14
|
+
# @param sqs_client [Aws::SQS::Client] An initialized Amazon SQS client.
|
15
|
+
# @param queue_name [String] The name of the queue.
|
16
|
+
# @return [Boolean] true if the queue was created; otherwise, false.
|
17
|
+
# @example
|
18
|
+
# exit 1 unless queue_created?(
|
19
|
+
# Aws::SQS::Client.new(region: 'us-west-2'),
|
20
|
+
# 'my-queue'
|
21
|
+
# )
|
22
|
+
HEADERS = {
|
23
|
+
'Content-Type' => 'application/json',
|
24
|
+
'Accept' => 'application/json'
|
25
|
+
}
|
26
|
+
|
27
|
+
def queue_created?(sqs_client, queue_name)
|
28
|
+
sqs_client.create_queue(queue_name: queue_name)
|
29
|
+
true
|
30
|
+
rescue StandardError => e
|
31
|
+
puts "Error creating queue: #{e.message}"
|
32
|
+
false
|
33
|
+
end
|
34
|
+
|
35
|
+
def messages_sent?(sqs_client, queue_url, entries)
|
36
|
+
sqs_client.send_message_batch(
|
37
|
+
queue_url: queue_url,
|
38
|
+
entries: entries
|
39
|
+
)
|
40
|
+
true
|
41
|
+
rescue StandardError => e
|
42
|
+
puts "Error sending messages: #{e.message}"
|
43
|
+
false
|
44
|
+
end
|
45
|
+
|
46
|
+
def submit_task(connection_pool, messages, queue_url, sqs_client)
|
47
|
+
connection_pool.post do
|
48
|
+
messages_sent?(sqs_client, queue_url, messages)
|
49
|
+
puts "Messages Sent"
|
50
|
+
end
|
51
|
+
end
|
52
|
+
|
53
|
+
# Full example call:
|
54
|
+
# Replace us-west-2 with the AWS Region you're using for Amazon SQS.
|
55
|
+
def run_me
|
56
|
+
region = 'us-west-2'
|
57
|
+
queue_name = 'my-queue'
|
58
|
+
sqs_client = Aws::SQS::Client.new(region: region)
|
59
|
+
|
60
|
+
puts "Creating the queue named '#{queue_name}'..."
|
61
|
+
|
62
|
+
if queue_created?(sqs_client, queue_name)
|
63
|
+
puts 'Queue created.'
|
64
|
+
else
|
65
|
+
puts 'Queue not created.'
|
66
|
+
end
|
67
|
+
queue_url = sqs_client.get_queue_url(queue_name: queue_name).queue_url
|
68
|
+
messages = Array.new
|
69
|
+
connection_pool = Concurrent::RubyThreadPoolExecutor.new(max_threads: 20, min_threads: 1, max_queue: 100000)
|
70
|
+
10000.times do |index|
|
71
|
+
puts "hello - #{index} - #{index % 10}"
|
72
|
+
body = {
|
73
|
+
'MessageType' => 'PROCESS_TRUCK_LOCATION',
|
74
|
+
'Source' => "#{index} SQS updates",
|
75
|
+
'Content' => {
|
76
|
+
'TimeZone' => 'UTC'
|
77
|
+
}
|
78
|
+
}
|
79
|
+
msg = {
|
80
|
+
id: SecureRandom.uuid,
|
81
|
+
message_body: body.to_json
|
82
|
+
}
|
83
|
+
=begin
|
84
|
+
RestClient::Request.execute(:method => :post, :url => "http://localhost:5001/process", :payload => body.to_json, :headers => HEADERS, :timeout => 10, :open_timeout => 5) do |response, request, result|
|
85
|
+
process_http_response response
|
86
|
+
end
|
87
|
+
=end
|
88
|
+
messages.push(msg)
|
89
|
+
if index % 10 == 9
|
90
|
+
submit_task(connection_pool, messages.clone, queue_url, sqs_client)
|
91
|
+
messages = Array.new
|
92
|
+
end
|
93
|
+
end
|
94
|
+
connection_pool.shutdown
|
95
|
+
connection_pool.wait_for_termination
|
96
|
+
end
|
97
|
+
|
98
|
+
def process_http_response(response)
|
99
|
+
case response.code
|
100
|
+
when 200
|
101
|
+
return "OK"
|
102
|
+
else
|
103
|
+
raise "Service did not return 200 OK response. #{response.code}"
|
104
|
+
end
|
105
|
+
end
|
106
|
+
|
107
|
+
run_me if $PROGRAM_NAME == __FILE__
|
@@ -0,0 +1,74 @@
|
|
1
|
+
module SqsPoller
|
2
|
+
module Common
|
3
|
+
class RingBuffer
|
4
|
+
|
5
|
+
def initialize(size)
|
6
|
+
@size = size
|
7
|
+
@start = 0
|
8
|
+
@count = 0
|
9
|
+
@buffer = Array.new(size)
|
10
|
+
@mutex = Mutex.new
|
11
|
+
end
|
12
|
+
|
13
|
+
def full?
|
14
|
+
@count == @size
|
15
|
+
end
|
16
|
+
|
17
|
+
def count
|
18
|
+
@count
|
19
|
+
end
|
20
|
+
|
21
|
+
def empty?
|
22
|
+
@count == 0
|
23
|
+
end
|
24
|
+
|
25
|
+
def push(value)
|
26
|
+
@mutex.synchronize do
|
27
|
+
stop = (@start + @count) % @size
|
28
|
+
@buffer[stop] = value
|
29
|
+
if full?
|
30
|
+
@start = (@start + 1) % @size
|
31
|
+
else
|
32
|
+
@count += 1
|
33
|
+
end
|
34
|
+
value
|
35
|
+
end
|
36
|
+
end
|
37
|
+
|
38
|
+
alias :<< :push
|
39
|
+
|
40
|
+
def shift
|
41
|
+
@mutex.synchronize do
|
42
|
+
remove_element
|
43
|
+
end
|
44
|
+
end
|
45
|
+
|
46
|
+
def flush
|
47
|
+
values = []
|
48
|
+
@mutex.synchronize do
|
49
|
+
until empty?
|
50
|
+
values << remove_element
|
51
|
+
end
|
52
|
+
end
|
53
|
+
values
|
54
|
+
end
|
55
|
+
|
56
|
+
def clear
|
57
|
+
@buffer = Array.new(@size)
|
58
|
+
@start = 0
|
59
|
+
@count = 0
|
60
|
+
end
|
61
|
+
|
62
|
+
private
|
63
|
+
|
64
|
+
def remove_element
|
65
|
+
return nil if empty?
|
66
|
+
value, @buffer[@start] = @buffer[@start], nil
|
67
|
+
@start = (@start + 1) % @size
|
68
|
+
@count -= 1
|
69
|
+
value
|
70
|
+
end
|
71
|
+
|
72
|
+
end
|
73
|
+
end
|
74
|
+
end
|
@@ -0,0 +1,47 @@
|
|
1
|
+
require "logger"
|
2
|
+
require "concurrent"
|
3
|
+
require "net/http"
|
4
|
+
require "aws-sdk"
|
5
|
+
|
6
|
+
module SqsPoller
|
7
|
+
module Common
|
8
|
+
class Utils
|
9
|
+
class << self
|
10
|
+
|
11
|
+
def get_current_time_in_millis
|
12
|
+
current_time = Time.now
|
13
|
+
(current_time.to_f * 1000).to_i
|
14
|
+
end
|
15
|
+
|
16
|
+
def start_timer
|
17
|
+
SqsPoller::Common::Utils::Timer.new
|
18
|
+
end
|
19
|
+
|
20
|
+
end
|
21
|
+
|
22
|
+
class Timer
|
23
|
+
def initialize
|
24
|
+
start
|
25
|
+
end
|
26
|
+
|
27
|
+
def start_time
|
28
|
+
@start_time
|
29
|
+
end
|
30
|
+
|
31
|
+
def stop_time
|
32
|
+
@stop_time
|
33
|
+
end
|
34
|
+
|
35
|
+
def start
|
36
|
+
@start_time = SqsPoller::Common::Utils.get_current_time_in_millis
|
37
|
+
end
|
38
|
+
|
39
|
+
def stop
|
40
|
+
@stop_time = SqsPoller::Common::Utils.get_current_time_in_millis
|
41
|
+
@stop_time - @start_time
|
42
|
+
end
|
43
|
+
|
44
|
+
end
|
45
|
+
end
|
46
|
+
end
|
47
|
+
end
|
@@ -0,0 +1,40 @@
|
|
1
|
+
require 'logger'
|
2
|
+
|
3
|
+
module SqsPoller
|
4
|
+
module Logger
|
5
|
+
class << self
|
6
|
+
|
7
|
+
@logger_file = nil
|
8
|
+
@logger = nil
|
9
|
+
@log_level
|
10
|
+
|
11
|
+
def get_new_logger (prog_name = nil, log_level = nil)
|
12
|
+
if @logger_file.nil?
|
13
|
+
logger = ::Logger.new(STDOUT)
|
14
|
+
else
|
15
|
+
logger = ::Logger.new(@logger_file)
|
16
|
+
end
|
17
|
+
@log_level = @log_level || ::Logger::ERROR
|
18
|
+
logger.level = log_level || @log_level
|
19
|
+
logger.progname = prog_name
|
20
|
+
logger
|
21
|
+
end
|
22
|
+
|
23
|
+
def set_log_level (log_level)
|
24
|
+
@log_level = log_level
|
25
|
+
end
|
26
|
+
|
27
|
+
def set_logger_file (logger_file)
|
28
|
+
@logger_file = logger_file
|
29
|
+
end
|
30
|
+
|
31
|
+
def get_logger
|
32
|
+
if @logger.nil?
|
33
|
+
@logger = get_new_logger
|
34
|
+
end
|
35
|
+
@logger
|
36
|
+
end
|
37
|
+
|
38
|
+
end
|
39
|
+
end
|
40
|
+
end
|
@@ -0,0 +1,19 @@
|
|
1
|
+
require 'ruby-metrics'
|
2
|
+
require 'sqspoller/logger/logger'
|
3
|
+
|
4
|
+
# Report local metrics in logs.
|
5
|
+
module SqsPoller
|
6
|
+
module Metrics
|
7
|
+
class LogReporter
|
8
|
+
|
9
|
+
def initialize(options = {})
|
10
|
+
@logger = SqsPoller::Logger.get_new_logger(self.class.name)
|
11
|
+
end
|
12
|
+
|
13
|
+
def report(agent)
|
14
|
+
@logger.info(JSON.pretty_generate(agent.to_json))
|
15
|
+
end
|
16
|
+
|
17
|
+
end
|
18
|
+
end
|
19
|
+
end
|
@@ -0,0 +1,42 @@
|
|
1
|
+
require 'sqspoller/logger/logger'
|
2
|
+
|
3
|
+
# This reports the Aws::SQS::QueuePoller stats for each queue and each worker in the log.
|
4
|
+
module SqsPoller
|
5
|
+
module Metrics
|
6
|
+
class SqsStatsReporter
|
7
|
+
# Default reporting delay is 60 seconds
|
8
|
+
DEFAULT_REPORTING_DELAY = 60
|
9
|
+
|
10
|
+
def stop
|
11
|
+
@running = false
|
12
|
+
end
|
13
|
+
|
14
|
+
def initialize(options = {})
|
15
|
+
@running = true
|
16
|
+
@logger = SqsPoller::Logger.get_new_logger(self.class.name)
|
17
|
+
|
18
|
+
if options[:queue_controller] == nil
|
19
|
+
raise "Need an agent to report data from"
|
20
|
+
end
|
21
|
+
|
22
|
+
delay = options[:delay] || DEFAULT_REPORTING_DELAY
|
23
|
+
queue_controller = options[:queue_controller]
|
24
|
+
|
25
|
+
Thread.new {
|
26
|
+
while @running
|
27
|
+
sleep delay
|
28
|
+
next if queue_controller.pollers.size == 0
|
29
|
+
queue_controller.pollers.each do |queue_name, pollers|
|
30
|
+
pollers.each.with_index(1) do |poller, index|
|
31
|
+
stats = poller.get_poller_stats
|
32
|
+
next if stats == nil
|
33
|
+
@logger.info("Queue: #{queue_name}, Worker: #{index} started: #{stats.polling_started_at}, requests: #{stats.request_count}, messages: #{stats.received_message_count}, last-timestamp: #{stats.last_message_received_at}")
|
34
|
+
end
|
35
|
+
end
|
36
|
+
end
|
37
|
+
}
|
38
|
+
end
|
39
|
+
|
40
|
+
end
|
41
|
+
end
|
42
|
+
end
|
@@ -0,0 +1,109 @@
|
|
1
|
+
require 'ruby-metrics'
|
2
|
+
require 'new_relic/agent'
|
3
|
+
require 'sqspoller/metrics/log_reporter'
|
4
|
+
|
5
|
+
# collect the metrics around queue, message_type and message count.
|
6
|
+
module SqsPoller
|
7
|
+
module Metrics
|
8
|
+
|
9
|
+
@new_relic_metrics_enabled = true
|
10
|
+
@duration_units = :milliseconds
|
11
|
+
@agent = nil
|
12
|
+
|
13
|
+
UNKNOWN_MESSAGE_TYPE = "UNKNOWN"
|
14
|
+
TASK_SUCCESS_STATUS = "success"
|
15
|
+
TASK_FAILURE_STATUS = "failed"
|
16
|
+
NEW_RELIC_CUSTOM_METRICS_PATH = "Custom/sqspoller/"
|
17
|
+
|
18
|
+
# @param [Object] task
|
19
|
+
def self.get_message_type(task)
|
20
|
+
message_type = UNKNOWN_MESSAGE_TYPE
|
21
|
+
begin
|
22
|
+
parsed_message = JSON.parse(task[:message].body, symbolize_names: true)
|
23
|
+
if parsed_message[:MessageType] != nil
|
24
|
+
message_type = parsed_message[:MessageType]
|
25
|
+
end
|
26
|
+
rescue Exception => e
|
27
|
+
# Ignoring When message Unknown. This happens when json is not parsable or message does not contains MessageType.
|
28
|
+
end
|
29
|
+
return message_type
|
30
|
+
end
|
31
|
+
|
32
|
+
def self.get_message_status(task_status)
|
33
|
+
task_status ? TASK_SUCCESS_STATUS : TASK_FAILURE_STATUS
|
34
|
+
end
|
35
|
+
|
36
|
+
def self.get_queue_name(task)
|
37
|
+
return task[:queue_name]
|
38
|
+
end
|
39
|
+
|
40
|
+
def self.get_metrics_recorder
|
41
|
+
return @agent
|
42
|
+
end
|
43
|
+
|
44
|
+
def self.get_qps
|
45
|
+
return @agent.timer(:total_messages)
|
46
|
+
end
|
47
|
+
|
48
|
+
def self.get_queue_wait_time(task, timer)
|
49
|
+
queue_time = task[:queue_time]
|
50
|
+
start_time = timer.start_time
|
51
|
+
return queue_time - start_time
|
52
|
+
end
|
53
|
+
|
54
|
+
def self.record(task, task_status, timer, elapsed_time)
|
55
|
+
return if @agent.nil?
|
56
|
+
queue_name = get_queue_name(task)
|
57
|
+
message_type = get_message_type(task)
|
58
|
+
message_status = get_message_status(task_status)
|
59
|
+
queue_wait_time = get_queue_wait_time(task, timer)
|
60
|
+
local_metrics(elapsed_time, message_status, message_type, queue_name, queue_wait_time)
|
61
|
+
|
62
|
+
new_relic_metrics(elapsed_time, message_status, message_type, queue_name, queue_wait_time) if @new_relic_metrics_enabled
|
63
|
+
|
64
|
+
end
|
65
|
+
|
66
|
+
def self.enable_new_relic_metrics
|
67
|
+
@new_relic_metrics_enabled = true
|
68
|
+
end
|
69
|
+
|
70
|
+
def self.disable_new_relic_metrics
|
71
|
+
@new_relic_metrics_enabled = false
|
72
|
+
end
|
73
|
+
|
74
|
+
private
|
75
|
+
|
76
|
+
def self.local_metrics(elapsed_time, message_status, message_type, queue_name, queue_wait_time)
|
77
|
+
@agent.timer(:total_messages, @duration_units).update(elapsed_time, @duration_units)
|
78
|
+
@agent.timer(queue_name.to_sym, @duration_units).update(elapsed_time, @duration_units)
|
79
|
+
@agent.timer("#{queue_name}_#{message_type}".to_sym, @duration_units).update(elapsed_time, @duration_units)
|
80
|
+
@agent.timer("#{queue_name}_#{message_type}_wait_time".to_sym, @duration_units).update(queue_wait_time, @duration_units)
|
81
|
+
|
82
|
+
@agent.counter("#{message_status}".to_sym).inc
|
83
|
+
@agent.counter("#{queue_name}_count".to_sym).inc
|
84
|
+
@agent.counter("#{queue_name}_#{message_status}".to_sym).inc
|
85
|
+
@agent.counter("#{queue_name}_#{message_type}_#{message_status}".to_sym).inc
|
86
|
+
end
|
87
|
+
|
88
|
+
def self.new_relic_metrics(elapsed_time, message_status, message_type, queue_name, queue_wait_time)
|
89
|
+
::NewRelic::Agent.record_metric("#{NEW_RELIC_CUSTOM_METRICS_PATH}all", elapsed_time)
|
90
|
+
::NewRelic::Agent.record_metric("#{NEW_RELIC_CUSTOM_METRICS_PATH}#{queue_name}", elapsed_time)
|
91
|
+
::NewRelic::Agent.record_metric("#{NEW_RELIC_CUSTOM_METRICS_PATH}#{queue_name}_#{message_type}", elapsed_time)
|
92
|
+
::NewRelic::Agent.record_metric("#{NEW_RELIC_CUSTOM_METRICS_PATH}#{queue_name}_#{message_type}_wait_time", queue_wait_time)
|
93
|
+
|
94
|
+
::NewRelic::Agent.increment_metric("#{NEW_RELIC_CUSTOM_METRICS_PATH}count")
|
95
|
+
::NewRelic::Agent.increment_metric("#{queue_name}_count")
|
96
|
+
::NewRelic::Agent.increment_metric("#{queue_name}_#{message_status}_count")
|
97
|
+
::NewRelic::Agent.increment_metric("#{queue_name}_#{message_type}_#{message_status}_count")
|
98
|
+
end
|
99
|
+
|
100
|
+
def self.start_metrics_agent
|
101
|
+
@agent = Metrics::Agent.new
|
102
|
+
@agent.report_to("log_reporter", SqsPoller::Metrics::LogReporter.new)
|
103
|
+
@agent.report_periodically
|
104
|
+
end
|
105
|
+
|
106
|
+
end
|
107
|
+
|
108
|
+
end
|
109
|
+
|
@@ -0,0 +1,141 @@
|
|
1
|
+
require "concurrent"
|
2
|
+
require "net/http"
|
3
|
+
require "aws-sdk"
|
4
|
+
require 'sqspoller/logger/logger'
|
5
|
+
require "sqspoller/poll/queue_poller"
|
6
|
+
require "sqspoller/metrics/queue_stats_reporter"
|
7
|
+
|
8
|
+
# QueueController Responsible for controlling ThreadPools per sqs queue.
|
9
|
+
# Each queue can configure max number of threads to fetch messages from SQS and queue it.
|
10
|
+
# For each queue there will be threadpool created and managed :workers_pool
|
11
|
+
# Each worker thread will create Aws::SQS::QueuePoller and poll messages from SQS.
|
12
|
+
module SqsPoller
|
13
|
+
module Poller
|
14
|
+
class QueueController
|
15
|
+
|
16
|
+
private_class_method :new
|
17
|
+
|
18
|
+
def initialize(queues_config, task_queue, aws_config)
|
19
|
+
@queues_config = queues_config
|
20
|
+
@aws_config = aws_config
|
21
|
+
@task_queue = task_queue
|
22
|
+
@logger = SqsPoller::Logger.get_new_logger(self.class.name)
|
23
|
+
@counter = Concurrent::MutexAtomicFixnum.new(0)
|
24
|
+
@sqs_client = Aws::SQS::Client.new(:access_key_id => @aws_config[:access_key_id], :secret_access_key => @aws_config[:secret_access_key], :region => @aws_config[:region])
|
25
|
+
@started = true
|
26
|
+
# hash of threadpools by queue name
|
27
|
+
@pollers_thread_pool = {}
|
28
|
+
# hash of QueuePollers by queue name
|
29
|
+
@pollers = {}
|
30
|
+
@queue_urls = {}
|
31
|
+
end
|
32
|
+
|
33
|
+
def self.get
|
34
|
+
return @instance if @instance
|
35
|
+
raise "QueueController not yet started"
|
36
|
+
end
|
37
|
+
|
38
|
+
def self.delete_messages(queue, messages)
|
39
|
+
self.get.delete_messages(queue, messages)
|
40
|
+
end
|
41
|
+
|
42
|
+
def self.delete_message(queue, message)
|
43
|
+
self.get.delete_message(queue, message)
|
44
|
+
end
|
45
|
+
|
46
|
+
def delete_messages(queue, messages)
|
47
|
+
@sqs_client.delete_message_batch(
|
48
|
+
queue_url: get_queue_url(queue),
|
49
|
+
entries: messages.map { |msg|
|
50
|
+
{ id: msg.message_id, receipt_handle: msg.receipt_handle }
|
51
|
+
}
|
52
|
+
)
|
53
|
+
end
|
54
|
+
|
55
|
+
def get_queue_url(queue)
|
56
|
+
@queue_urls[queue]
|
57
|
+
end
|
58
|
+
|
59
|
+
def delete_message(queue, message)
|
60
|
+
delete_msg = {
|
61
|
+
queue_url: get_queue_url(queue),
|
62
|
+
receipt_handle: message.receipt_handle,
|
63
|
+
}
|
64
|
+
@client.delete_message(delete_msg)
|
65
|
+
end
|
66
|
+
|
67
|
+
def started?
|
68
|
+
@started
|
69
|
+
end
|
70
|
+
|
71
|
+
def pollers
|
72
|
+
@pollers
|
73
|
+
end
|
74
|
+
|
75
|
+
def self.start (queues_config, task_queue, aws_config)
|
76
|
+
return @instance if @instance
|
77
|
+
@instance = new(queues_config, task_queue, aws_config)
|
78
|
+
@instance.start_queue_controller
|
79
|
+
@instance.start_poller_stats_reporter
|
80
|
+
@instance
|
81
|
+
end
|
82
|
+
|
83
|
+
def start_poller_stats_reporter
|
84
|
+
SqsPoller::Metrics::SqsStatsReporter.new(
|
85
|
+
{
|
86
|
+
:queue_controller => self
|
87
|
+
}
|
88
|
+
)
|
89
|
+
end
|
90
|
+
|
91
|
+
def start_queue_controller
|
92
|
+
@queues_config.keys.each { |queue|
|
93
|
+
queue_config = @queues_config[queue]
|
94
|
+
polling_threads = queue_config[:polling_threads]
|
95
|
+
if polling_threads == 0
|
96
|
+
@logger.info "Polling disabled for queue: #{queue}"
|
97
|
+
next
|
98
|
+
end
|
99
|
+
started = false
|
100
|
+
begin
|
101
|
+
sqs_queue_config = @sqs_client.get_queue_url(queue_name: queue)
|
102
|
+
started = start_pollers(polling_threads, queue, sqs_queue_config.queue_url, queue_config)
|
103
|
+
@queue_urls[queue] = sqs_queue_config.queue_url
|
104
|
+
rescue Exception => e
|
105
|
+
@logger.error "Failed to start Queue Pollers. Caught error: #{e.message}\n #{e.backtrace.join("\n")}"
|
106
|
+
end
|
107
|
+
unless started
|
108
|
+
@started = false
|
109
|
+
@logger.error "Failed to start Queue Pollers for the queue #{queue}"
|
110
|
+
end
|
111
|
+
}
|
112
|
+
end
|
113
|
+
|
114
|
+
private
|
115
|
+
|
116
|
+
def start_pollers(polling_threads, queue, queue_url, queue_config)
|
117
|
+
workers = Concurrent::RubyThreadPoolExecutor.new(max_threads: polling_threads, min_threads: 1, max_queue: polling_threads)
|
118
|
+
@pollers_thread_pool[queue] = workers
|
119
|
+
@pollers[queue] = []
|
120
|
+
polling_threads.times do |index|
|
121
|
+
worker = QueuePoller.new "QueuePoller-#{index}", queue, queue_config, @task_queue, @sqs_client, queue_url, @counter
|
122
|
+
@pollers[queue] << worker
|
123
|
+
workers.post do
|
124
|
+
@logger.info "Starting QueuePoller-#{queue}-#{index} object for queue: #{queue}"
|
125
|
+
loop do
|
126
|
+
begin
|
127
|
+
worker.run
|
128
|
+
rescue Exception => e
|
129
|
+
@logger.error "Poller killed for queue: #{e} and restarted"
|
130
|
+
end
|
131
|
+
end
|
132
|
+
end
|
133
|
+
sleep 0.01
|
134
|
+
end
|
135
|
+
all_workers_started = workers.length == polling_threads
|
136
|
+
all_workers_started
|
137
|
+
end
|
138
|
+
|
139
|
+
end
|
140
|
+
end
|
141
|
+
end
|
@@ -0,0 +1,73 @@
|
|
1
|
+
require "concurrent"
|
2
|
+
require "net/http"
|
3
|
+
require "aws-sdk"
|
4
|
+
require 'sqspoller/common/utils'
|
5
|
+
require 'sqspoller/logger/logger'
|
6
|
+
|
7
|
+
# QueuePoller responsible for polling batch message[s] using Aws::SQS::QueuePoller based on configured :max_number_of_messages and push to the task queue.
|
8
|
+
# If there is no messages available SQS it will keep polling based on :wait_time_seconds until the message arrives.
|
9
|
+
module SqsPoller
|
10
|
+
module Poller
|
11
|
+
class QueuePoller
|
12
|
+
|
13
|
+
DEFAULT_SQS_WAIT_TIME_SECONDS = 20
|
14
|
+
DEFAULT_SQS_MAX_NUMBER_OF_MESSAGES = 10
|
15
|
+
|
16
|
+
def initialize(worker_name, queue_name, queue_config, task_queue, sqs_client, queue_url, counter)
|
17
|
+
@worker_name = worker_name
|
18
|
+
@queue_name = queue_name
|
19
|
+
@queue_config = queue_config
|
20
|
+
@task_queue = task_queue
|
21
|
+
@wait_time_seconds = get_wait_time_seconds(@queue_config)
|
22
|
+
@max_number_of_messages = get_max_number_of_messages(@queue_config)
|
23
|
+
@sqs_client = sqs_client
|
24
|
+
@queue_url = queue_url
|
25
|
+
@logger = SqsPoller::Logger.get_new_logger("#{self.class.name}-#{@worker_name}")
|
26
|
+
@counter = counter
|
27
|
+
end
|
28
|
+
|
29
|
+
def get_poller_stats
|
30
|
+
@poller_stats
|
31
|
+
end
|
32
|
+
|
33
|
+
def run
|
34
|
+
poller = Aws::SQS::QueuePoller.new(@queue_url, { client: @sqs_client })
|
35
|
+
poller.before_request do |stats|
|
36
|
+
@poller_stats = stats
|
37
|
+
end
|
38
|
+
poller.poll(skip_delete: true, :wait_time_seconds => @wait_time_seconds, :max_number_of_messages => @max_number_of_messages) do |messages|
|
39
|
+
if @max_number_of_messages == 1
|
40
|
+
messages = [messages]
|
41
|
+
end
|
42
|
+
messages.each { |message|
|
43
|
+
task = {
|
44
|
+
:message => message,
|
45
|
+
:queue_name => @queue_name,
|
46
|
+
:queue_time => SqsPoller::Common::Utils.get_current_time_in_millis,
|
47
|
+
:index => @counter.increment
|
48
|
+
}
|
49
|
+
@task_queue.push task
|
50
|
+
}
|
51
|
+
@logger.info "Queued #{messages.size} messages from #{@queue_name}"
|
52
|
+
end
|
53
|
+
end
|
54
|
+
|
55
|
+
def get_wait_time_seconds(queue_config)
|
56
|
+
wait_time_seconds = queue_config[:wait_time_seconds]
|
57
|
+
unless wait_time_seconds && wait_time_seconds >= 0 && wait_time_seconds <= DEFAULT_SQS_WAIT_TIME_SECONDS
|
58
|
+
wait_time_seconds = DEFAULT_SQS_WAIT_TIME_SECONDS
|
59
|
+
end
|
60
|
+
wait_time_seconds
|
61
|
+
end
|
62
|
+
|
63
|
+
def get_max_number_of_messages(queue_config)
|
64
|
+
max_number_of_messages = queue_config[:max_number_of_messages]
|
65
|
+
unless max_number_of_messages && max_number_of_messages >= 0 && max_number_of_messages <= DEFAULT_SQS_MAX_NUMBER_OF_MESSAGES
|
66
|
+
max_number_of_messages = DEFAULT_SQS_MAX_NUMBER_OF_MESSAGES
|
67
|
+
end
|
68
|
+
max_number_of_messages
|
69
|
+
end
|
70
|
+
|
71
|
+
end
|
72
|
+
end
|
73
|
+
end
|