pgq 0.1
Sign up to get free protection for your applications and to get access to all the features.
- data/.gitignore +4 -0
- data/Gemfile +4 -0
- data/Gemfile.lock +44 -0
- data/LICENSE +20 -0
- data/README.md +207 -0
- data/Rakefile +9 -0
- data/init.rb +1 -0
- data/lib/generators/pgq/add_generator.rb +40 -0
- data/lib/generators/pgq/add_templates/migration.rb +9 -0
- data/lib/generators/pgq/add_templates/pgq_class.rb +10 -0
- data/lib/generators/pgq/add_templates/spec.rb +8 -0
- data/lib/generators/pgq/config_generator.rb +18 -0
- data/lib/generators/pgq/config_templates/pgq.rb +12 -0
- data/lib/pgq.rb +9 -0
- data/lib/pgq/api.rb +88 -0
- data/lib/pgq/consumer.rb +23 -0
- data/lib/pgq/consumer_base.rb +175 -0
- data/lib/pgq/consumer_group.rb +27 -0
- data/lib/pgq/event.rb +43 -0
- data/lib/pgq/marshal64_coder.rb +13 -0
- data/lib/pgq/railtie.rb +9 -0
- data/lib/pgq/utils.rb +127 -0
- data/lib/pgq/version.rb +3 -0
- data/lib/pgq/worker.rb +83 -0
- data/lib/tasks/pgq.rake +64 -0
- data/pgq.gemspec +28 -0
- data/spec/consumer_base_spec.rb +149 -0
- data/spec/consumer_group_spec.rb +37 -0
- data/spec/consumer_spec.rb +53 -0
- data/spec/event_spec.rb +32 -0
- data/spec/marshal64_coder_spec.rb +22 -0
- data/spec/spec_helper.rb +11 -0
- data/spec/worker_spec.rb +53 -0
- metadata +157 -0
data/lib/pgq/consumer.rb
ADDED
@@ -0,0 +1,23 @@
|
|
1
|
+
require 'pgq/consumer_base'
|
2
|
+
|
3
|
+
# Cute class, for magick inserts and light consume
|
4
|
+
|
5
|
+
class Pgq::Consumer < Pgq::ConsumerBase
|
6
|
+
|
7
|
+
# == magick insert events
|
8
|
+
|
9
|
+
def self.method_missing(method_name, *args)
|
10
|
+
enqueue(method_name, *args)
|
11
|
+
end
|
12
|
+
|
13
|
+
def self.add_event(method_name, *args)
|
14
|
+
enqueue(method_name, *args)
|
15
|
+
end
|
16
|
+
|
17
|
+
# == magick consume
|
18
|
+
|
19
|
+
def perform(method_name, *args)
|
20
|
+
self.send(method_name, *args)
|
21
|
+
end
|
22
|
+
|
23
|
+
end
|
@@ -0,0 +1,175 @@
|
|
1
|
+
require 'pgq/utils'
|
2
|
+
require 'pgq/api'
|
3
|
+
require 'active_support/inflector' unless ''.respond_to?(:underscore)
|
4
|
+
|
5
|
+
class Pgq::ConsumerBase
|
6
|
+
extend Pgq::Utils
|
7
|
+
|
8
|
+
@queue_name = 'default'
|
9
|
+
@consumer_name = 'default'
|
10
|
+
|
11
|
+
attr_accessor :logger, :queue_name, :consumer_name
|
12
|
+
|
13
|
+
# == connection
|
14
|
+
|
15
|
+
def self.database
|
16
|
+
ActiveRecord::Base # can redefine
|
17
|
+
end
|
18
|
+
|
19
|
+
def database
|
20
|
+
self.class.database
|
21
|
+
end
|
22
|
+
|
23
|
+
def self.connection
|
24
|
+
database.connection
|
25
|
+
end
|
26
|
+
|
27
|
+
def connection
|
28
|
+
self.class.connection
|
29
|
+
end
|
30
|
+
|
31
|
+
# == queue name
|
32
|
+
|
33
|
+
def self.extract_queue_name
|
34
|
+
self.name.to_s.gsub(/^pgq/i, '').underscore.gsub('/', '-')
|
35
|
+
end
|
36
|
+
|
37
|
+
def self.set_queue_name(name)
|
38
|
+
self.instance_variable_set('@queue_name', name.to_s)
|
39
|
+
end
|
40
|
+
|
41
|
+
# magic set queue_name from class name
|
42
|
+
def self.inherited(subclass)
|
43
|
+
subclass.set_queue_name(subclass.extract_queue_name)
|
44
|
+
subclass.instance_variable_set('@consumer_name', self.consumer_name)
|
45
|
+
end
|
46
|
+
|
47
|
+
def self.consumer_name
|
48
|
+
@consumer_name
|
49
|
+
end
|
50
|
+
|
51
|
+
def self.queue_name
|
52
|
+
@queue_name
|
53
|
+
end
|
54
|
+
|
55
|
+
# this method used when insert event, possible to reuse
|
56
|
+
def self.next_queue_name
|
57
|
+
self.queue_name
|
58
|
+
end
|
59
|
+
|
60
|
+
# == coder
|
61
|
+
|
62
|
+
def self.coder
|
63
|
+
Pgq::Marshal64Coder
|
64
|
+
end
|
65
|
+
|
66
|
+
def coder
|
67
|
+
self.class.coder
|
68
|
+
end
|
69
|
+
|
70
|
+
# == insert event
|
71
|
+
|
72
|
+
def self.enqueue(method_name, *args)
|
73
|
+
self.database.pgq_insert_event( self.next_queue_name, method_name.to_s, coder.dump(args) )
|
74
|
+
end
|
75
|
+
|
76
|
+
# == consumer part
|
77
|
+
|
78
|
+
def initialize(logger = nil, custom_queue_name = nil, custom_consumer_name = nil)
|
79
|
+
self.queue_name = custom_queue_name || self.class.queue_name
|
80
|
+
self.consumer_name = custom_consumer_name || self.class.consumer_name
|
81
|
+
self.logger = logger
|
82
|
+
@batch_id = nil
|
83
|
+
end
|
84
|
+
|
85
|
+
def perform_batch
|
86
|
+
events = []
|
87
|
+
pgq_events = get_batch_events
|
88
|
+
|
89
|
+
return 0 if pgq_events.blank?
|
90
|
+
|
91
|
+
events = pgq_events.map{|ev| Pgq::Event.new(self, ev) }
|
92
|
+
size = events.size
|
93
|
+
log_info "=> batch(#{queue_name}): events #{size}"
|
94
|
+
|
95
|
+
perform_events(events)
|
96
|
+
|
97
|
+
rescue Exception => ex
|
98
|
+
all_events_failed(events, ex)
|
99
|
+
|
100
|
+
rescue => ex
|
101
|
+
all_events_failed(events, ex)
|
102
|
+
|
103
|
+
ensure
|
104
|
+
finish_batch(events.size)
|
105
|
+
|
106
|
+
return events.size
|
107
|
+
end
|
108
|
+
|
109
|
+
def perform_events(events)
|
110
|
+
events.each do |event|
|
111
|
+
perform_event(event)
|
112
|
+
end
|
113
|
+
end
|
114
|
+
|
115
|
+
def perform_event(event)
|
116
|
+
type = event.type
|
117
|
+
data = event.data
|
118
|
+
|
119
|
+
perform(type, *data)
|
120
|
+
|
121
|
+
rescue Exception => ex
|
122
|
+
message = event.exception_message(ex)
|
123
|
+
self.log_error(message)
|
124
|
+
event.failed!(message)
|
125
|
+
|
126
|
+
rescue => ex
|
127
|
+
message = event.exception_message(ex)
|
128
|
+
self.log_error(message)
|
129
|
+
event.failed!(message)
|
130
|
+
end
|
131
|
+
|
132
|
+
def perform(type, *data)
|
133
|
+
raise "realize me"
|
134
|
+
end
|
135
|
+
|
136
|
+
def get_batch_events
|
137
|
+
@batch_id = database.pgq_next_batch(queue_name, consumer_name)
|
138
|
+
return nil if !@batch_id
|
139
|
+
database.pgq_get_batch_events(@batch_id)
|
140
|
+
end
|
141
|
+
|
142
|
+
def finish_batch(count = nil)
|
143
|
+
return unless @batch_id
|
144
|
+
database.pgq_finish_batch(@batch_id)
|
145
|
+
@batch_id = nil
|
146
|
+
end
|
147
|
+
|
148
|
+
def event_failed(event_id, reason)
|
149
|
+
database.pgq_event_failed(@batch_id, event_id, reason)
|
150
|
+
end
|
151
|
+
|
152
|
+
def event_retry(event_id)
|
153
|
+
database.pgq_event_retry(@batch_id, event_id, 0)
|
154
|
+
end
|
155
|
+
|
156
|
+
def all_events_failed(events, ex)
|
157
|
+
message = Pgq::Event.exception_message(ex)
|
158
|
+
log_error(message)
|
159
|
+
|
160
|
+
events.each do |event|
|
161
|
+
event.failed!(message)
|
162
|
+
end
|
163
|
+
end
|
164
|
+
|
165
|
+
# == log methods
|
166
|
+
|
167
|
+
def log_info(mes)
|
168
|
+
@logger.info(mes) if @logger
|
169
|
+
end
|
170
|
+
|
171
|
+
def log_error(mes)
|
172
|
+
@logger.error(mes) if @logger
|
173
|
+
end
|
174
|
+
|
175
|
+
end
|
@@ -0,0 +1,27 @@
|
|
1
|
+
# for consuming full batch (usefull if need group events for some group processing)
|
2
|
+
# usually group is ~500 events
|
3
|
+
|
4
|
+
require 'pgq/consumer'
|
5
|
+
|
6
|
+
class Pgq::ConsumerGroup < Pgq::Consumer
|
7
|
+
|
8
|
+
# {'type' => [events]}
|
9
|
+
def perform_group(events_hash)
|
10
|
+
raise "realize me"
|
11
|
+
end
|
12
|
+
|
13
|
+
def perform_events(events)
|
14
|
+
events = sum_events(events)
|
15
|
+
# log_info "consume events (#{self.queue_name}): #{events.map{|k,v| [k, v.size]}.inspect}"
|
16
|
+
perform_group(events) if events.present?
|
17
|
+
end
|
18
|
+
|
19
|
+
def sum_events(events)
|
20
|
+
events.inject({}) do |result, event|
|
21
|
+
result[event.type] ||= []
|
22
|
+
result[event.type] << event
|
23
|
+
result
|
24
|
+
end
|
25
|
+
end
|
26
|
+
|
27
|
+
end
|
data/lib/pgq/event.rb
ADDED
@@ -0,0 +1,43 @@
|
|
1
|
+
class Pgq::Event
|
2
|
+
attr_reader :type, :data, :id, :consumer
|
3
|
+
|
4
|
+
def initialize(consumer, event)
|
5
|
+
@id = event['ev_id']
|
6
|
+
@type = event['ev_type']
|
7
|
+
@data = consumer.coder.load(event['ev_data']) if event['ev_data']
|
8
|
+
@consumer = consumer
|
9
|
+
end
|
10
|
+
|
11
|
+
def failed!(ex = 'Something happens')
|
12
|
+
if ex.is_a?(String)
|
13
|
+
@consumer.event_failed @id, ex
|
14
|
+
else # exception
|
15
|
+
@consumer.event_failed @id, exception_message(ex)
|
16
|
+
end
|
17
|
+
end
|
18
|
+
|
19
|
+
def retry!
|
20
|
+
@consumer.event_retry(@id)
|
21
|
+
end
|
22
|
+
|
23
|
+
def self.exception_message(e)
|
24
|
+
<<-EXCEPTION
|
25
|
+
Exception happend
|
26
|
+
Type: #{e.class.inspect}
|
27
|
+
Error occurs: #{e.message}
|
28
|
+
Backtrace: #{e.backtrace.join("\n") rescue ''}
|
29
|
+
EXCEPTION
|
30
|
+
end
|
31
|
+
|
32
|
+
# Prepare string with exception details
|
33
|
+
def exception_message(e)
|
34
|
+
<<-EXCEPTION
|
35
|
+
Exception happend
|
36
|
+
Type: #{type.inspect} #{e.class.inspect}
|
37
|
+
Data: #{data.inspect}
|
38
|
+
Error occurs: #{e.message}
|
39
|
+
Backtrace: #{e.backtrace.join("\n") rescue ''}
|
40
|
+
EXCEPTION
|
41
|
+
end
|
42
|
+
|
43
|
+
end
|
data/lib/pgq/railtie.rb
ADDED
data/lib/pgq/utils.rb
ADDED
@@ -0,0 +1,127 @@
|
|
1
|
+
module Pgq::Utils
|
2
|
+
|
3
|
+
# == all queues for database
|
4
|
+
def queues_list
|
5
|
+
database.pgq_get_consumer_info.map{|x| x['queue_name']}
|
6
|
+
end
|
7
|
+
|
8
|
+
# == methods for migrations
|
9
|
+
def add_queue(queue_name, consumer_name = self.consumer_name)
|
10
|
+
database.pgq_add_queue(queue_name, consumer_name)
|
11
|
+
end
|
12
|
+
|
13
|
+
def remove_queue(queue_name, consumer_name = self.consumer_name)
|
14
|
+
database.pgq_remove_queue(queue_name, consumer_name)
|
15
|
+
end
|
16
|
+
|
17
|
+
# == inspect queue
|
18
|
+
# { type => events_count }
|
19
|
+
def inspect_queue(queue_name)
|
20
|
+
ticks = database.pgq_get_queue_info(queue_name)
|
21
|
+
table = connection.select_value("SELECT queue_data_pfx as table FROM pgq.queue where queue_name = #{database.sanitize(queue_name)}")
|
22
|
+
|
23
|
+
result = {}
|
24
|
+
|
25
|
+
if ticks['current_batch']
|
26
|
+
sql = connection.select_value("SELECT * from pgq.batch_event_sql(#{database.sanitize(ticks['current_batch'].to_i)})")
|
27
|
+
last_event = connection.select_value("SELECT MAX(ev_id) AS count FROM (#{sql}) AS x")
|
28
|
+
|
29
|
+
stats = connection.select_all <<-SQL
|
30
|
+
SELECT count(*) as count, ev_type
|
31
|
+
FROM #{table}
|
32
|
+
WHERE ev_id > #{database.sanitize(last_event.to_i)}
|
33
|
+
GROUP BY ev_type
|
34
|
+
SQL
|
35
|
+
|
36
|
+
stats.each do |x|
|
37
|
+
result["#{x['ev_type']}"] = x['count'].to_i
|
38
|
+
end
|
39
|
+
|
40
|
+
else
|
41
|
+
stats = connection.select_all <<-SQL
|
42
|
+
SELECT ev_type
|
43
|
+
FROM #{table}
|
44
|
+
GROUP BY ev_type
|
45
|
+
SQL
|
46
|
+
|
47
|
+
stats.each do |x|
|
48
|
+
result["#{x['ev_type']}"] = 0
|
49
|
+
end
|
50
|
+
end
|
51
|
+
|
52
|
+
result
|
53
|
+
end
|
54
|
+
|
55
|
+
def inspect_self_queue
|
56
|
+
self.inspect_queue(self.queue_name)
|
57
|
+
end
|
58
|
+
|
59
|
+
# show hash stats, for londiste type of storage events
|
60
|
+
# { type => events_count }
|
61
|
+
def inspect_londiste_queue(queue_name)
|
62
|
+
ticks = database.pgq_get_consumer_info
|
63
|
+
table = connection.select_value(connection.sanitize_sql_array ["SELECT queue_data_pfx as table FROM pgq.queue where queue_name = ?", queue_name])
|
64
|
+
|
65
|
+
result = {}
|
66
|
+
|
67
|
+
if ticks['current_batch']
|
68
|
+
sql = connection.select_value("SELECT * from pgq.batch_event_sql(#{database.sanitize(ticks['current_batch'].to_i)})")
|
69
|
+
last_event = connection.select_value("SELECT MAX(ev_id) AS count FROM (#{sql}) AS x")
|
70
|
+
|
71
|
+
stats = connection.select_all <<-SQL
|
72
|
+
SELECT count(*) as count, ev_type, ev_extra1
|
73
|
+
FROM #{table}
|
74
|
+
WHERE ev_id > #{database.sanitize(last_event.to_i)}
|
75
|
+
GROUP BY ev_type, ev_extra1
|
76
|
+
SQL
|
77
|
+
|
78
|
+
stats.each do |x|
|
79
|
+
result["#{x['ev_extra1']}:#{x['ev_type']}"] = x['count'].to_i
|
80
|
+
end
|
81
|
+
|
82
|
+
else
|
83
|
+
stats = connection.select_all <<-SQL
|
84
|
+
SELECT ev_type, ev_extra1
|
85
|
+
FROM #{table}
|
86
|
+
GROUP BY ev_type, ev_extra1 ORDER BY ev_extra1, ev_type
|
87
|
+
SQL
|
88
|
+
|
89
|
+
stats.each do |x|
|
90
|
+
result["#{x['ev_extra1']}:#{x['ev_type']}"] = 0
|
91
|
+
end
|
92
|
+
end
|
93
|
+
|
94
|
+
result
|
95
|
+
end
|
96
|
+
|
97
|
+
|
98
|
+
# == proxing method for tests
|
99
|
+
def proxy(method_name)
|
100
|
+
self.should_receive(method_name) do |*data|
|
101
|
+
x = self.coder.load(self.coder.dump(data))
|
102
|
+
self.new.send(:perform, method_name, *x)
|
103
|
+
end.any_number_of_times
|
104
|
+
end
|
105
|
+
|
106
|
+
# == resend failed events in queue
|
107
|
+
def resend_failed_events(queue_name, limit = 5_000)
|
108
|
+
events = database.pgq_failed_event_list(queue_name, self.consumer_name, limit, nil, 'asc') || []
|
109
|
+
|
110
|
+
events.each do |event|
|
111
|
+
database.pgq_failed_event_retry(queue_name, self.consumer_name, event['ev_id'])
|
112
|
+
end
|
113
|
+
|
114
|
+
events.length
|
115
|
+
end
|
116
|
+
|
117
|
+
def clear_failed_events(queue_name, limit = 5_000)
|
118
|
+
events = database.pgq_failed_event_list(queue_name, self.consumer_name, limit, nil, 'asc') || []
|
119
|
+
|
120
|
+
events.each do |event|
|
121
|
+
database.pgq_failed_event_delete(queue_name, self.consumer_name, event['ev_id'])
|
122
|
+
end
|
123
|
+
|
124
|
+
events.length
|
125
|
+
end
|
126
|
+
|
127
|
+
end
|
data/lib/pgq/version.rb
ADDED
data/lib/pgq/worker.rb
ADDED
@@ -0,0 +1,83 @@
|
|
1
|
+
require 'logger'
|
2
|
+
|
3
|
+
class Pgq::Worker
|
4
|
+
attr_reader :logger, :queues, :consumers, :sleep_time, :watch_file
|
5
|
+
|
6
|
+
def self.predict_queue_class(queue)
|
7
|
+
klass = nil
|
8
|
+
unless klass
|
9
|
+
queue.to_s.match(/([a-z_]+)/i)
|
10
|
+
klass_s = $1.to_s
|
11
|
+
klass_s.chop! if klass_s.size > 0 && klass_s[-1].chr == '_'
|
12
|
+
klass_s = "pgq_" + klass_s unless klass_s.start_with?("pgq_")
|
13
|
+
klass = klass_s.camelize.constantize rescue nil
|
14
|
+
klass = nil unless klass.is_a?(Class)
|
15
|
+
end
|
16
|
+
klass
|
17
|
+
end
|
18
|
+
|
19
|
+
def self.connection(queue)
|
20
|
+
klass = predict_queue_class(queue)
|
21
|
+
if klass
|
22
|
+
klass.connection
|
23
|
+
else
|
24
|
+
raise "can't find klass for queue #{queue}"
|
25
|
+
end
|
26
|
+
end
|
27
|
+
|
28
|
+
def initialize(h)
|
29
|
+
@logger = h[:logger] || (defined?(Rails) && Rails.logger) || Logger.new(STDOUT)
|
30
|
+
@consumers = []
|
31
|
+
|
32
|
+
queues = h[:queues]
|
33
|
+
raise "Queue not selected" if queues.blank?
|
34
|
+
|
35
|
+
if queues == ['all'] || queues == 'all'
|
36
|
+
if defined?(Rails) && File.exists?(Rails.root + "config/queues_list.yml")
|
37
|
+
queues = YAML.load_file(Rails.root + "config/queues_list.yml")
|
38
|
+
else
|
39
|
+
raise "You shoud create config/queues_list.yml for all queues"
|
40
|
+
end
|
41
|
+
end
|
42
|
+
|
43
|
+
queues = queues.split(',') if queues.is_a?(String)
|
44
|
+
|
45
|
+
queues.each do |queue|
|
46
|
+
klass = Pgq::Worker.predict_queue_class(queue)
|
47
|
+
if klass
|
48
|
+
@consumers << klass.new(@logger, queue)
|
49
|
+
else
|
50
|
+
raise "Unknown queue: #{queue}"
|
51
|
+
end
|
52
|
+
end
|
53
|
+
|
54
|
+
@watch_file = h[:watch_file]
|
55
|
+
@sleep_time = h[:sleep_time] || 0.5
|
56
|
+
end
|
57
|
+
|
58
|
+
def process_batch
|
59
|
+
process_count = 0
|
60
|
+
|
61
|
+
@consumers.each do |consumer|
|
62
|
+
process_count += consumer.perform_batch
|
63
|
+
|
64
|
+
if @watch_file && File.exists?(@watch_file)
|
65
|
+
logger.info "Found file #{@watch_file}, exiting!"
|
66
|
+
File.unlink(@watch_file)
|
67
|
+
return processed_count
|
68
|
+
end
|
69
|
+
end
|
70
|
+
|
71
|
+
process_count
|
72
|
+
end
|
73
|
+
|
74
|
+
def run
|
75
|
+
logger.info "Worker start"
|
76
|
+
|
77
|
+
loop do
|
78
|
+
processed_count = process_batch
|
79
|
+
sleep(@sleep_time) if processed_count == 0
|
80
|
+
end
|
81
|
+
end
|
82
|
+
|
83
|
+
end
|