pgq 0.1 → 0.1.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,7 +1,7 @@
1
1
  PATH
2
2
  remote: .
3
3
  specs:
4
- pgq (0.1)
4
+ pgq (0.1.1)
5
5
  activerecord (>= 2.3.2)
6
6
  activesupport (>= 2.3.2)
7
7
 
data/README.md CHANGED
@@ -74,7 +74,7 @@ class PgqMy < Pgq::Consumer
74
74
  end
75
75
  ```
76
76
 
77
- Insert event into queue like this:
77
+ Insert event into queue:
78
78
 
79
79
  PgqMy.some_method1(1, 2, 3)
80
80
 
@@ -97,7 +97,7 @@ Start worker for queue:
97
97
 
98
98
 
99
99
 
100
- Also can consume manual, or write [bin_script](http://github.com/kostya/bin_script) like this:
100
+ Also can consume manual, or write [bin_script](http://github.com/kostya/bin_script):
101
101
  ```ruby
102
102
  class PgqRunnerScript < BinScript
103
103
 
@@ -135,11 +135,11 @@ When any raise happens in consumer, its produce failed event, which can be retry
135
135
 
136
136
  Retry manual:
137
137
 
138
- Pgq::Consumer.resend_failed_events(queue_name)
138
+ Pgq::Consumer.retry_failed_events(queue_name)
139
139
 
140
140
  Delete manual:
141
141
 
142
- Pgq::Consumer.clear_failed_events(queue_name)
142
+ Pgq::Consumer.delete_failed_events(queue_name)
143
143
 
144
144
 
145
145
  ### Divide events between workers, for one consumer class
@@ -1,6 +1,8 @@
1
1
  module Pgq::Api
2
2
  # should mixin to class, which have connection
3
3
 
4
+ # == manage queues
5
+
4
6
  def pgq_create_queue(queue_name)
5
7
  connection.select_value(sanitize_sql_array ["SELECT pgq.create_queue(?)", queue_name]).to_i
6
8
  end
@@ -16,16 +18,8 @@ module Pgq::Api
16
18
  def pgq_unregister_consumer(queue_name, consumer_id)
17
19
  connection.select_value(sanitize_sql_array ["SELECT pgq.unregister_consumer(?, ?)", queue_name, consumer_id]).to_i
18
20
  end
19
-
20
- def pgq_add_queue(queue_name, consumer_name)
21
- pgq_create_queue(queue_name.to_s)
22
- pgq_register_consumer(queue_name.to_s, consumer_name.to_s)
23
- end
24
-
25
- def pgq_remove_queue(queue_name, consumer_name)
26
- pgq_unregister_consumer(queue_name.to_s, consumer_name.to_s)
27
- pgq_drop_queue(queue_name.to_s)
28
- end
21
+
22
+ # == insert events
29
23
 
30
24
  def pgq_insert_event(queue_name, ev_type, ev_data, ev_extra1 = nil, ev_extra2 = nil, ev_extra3 = nil, ev_extra4 = nil)
31
25
  result = connection.select_value(sanitize_sql_array ["SELECT pgq.insert_event(?, ?, ?, ?, ?, ?, ?)",
@@ -33,6 +27,8 @@ module Pgq::Api
33
27
  result ? result.to_i : nil
34
28
  end
35
29
 
30
+ # == consuming
31
+
36
32
  def pgq_next_batch(queue_name, consumer_id)
37
33
  result = connection.select_value(sanitize_sql_array ["SELECT pgq.next_batch(?, ?)", queue_name, consumer_id])
38
34
  result ? result.to_i : nil
@@ -42,22 +38,22 @@ module Pgq::Api
42
38
  connection.select_all(sanitize_sql_array ["SELECT * FROM pgq.get_batch_events(?)", batch_id])
43
39
  end
44
40
 
41
+ def pgq_finish_batch(batch_id)
42
+ connection.select_value(sanitize_sql_array ["SELECT pgq.finish_batch(?)", batch_id])
43
+ end
44
+
45
+ # == failed/retry
46
+
45
47
  def pgq_event_failed(batch_id, event_id, reason)
46
48
  connection.select_value(sanitize_sql_array ["SELECT pgq.event_failed(?, ?, ?)", batch_id, event_id, reason]).to_i
47
49
  end
48
-
50
+
49
51
  def pgq_event_retry(batch_id, event_id, retry_seconds)
50
52
  connection.select_value(sanitize_sql_array ["SELECT pgq.event_retry(?, ?, ?)", batch_id, event_id, retry_seconds]).to_i
51
- end
52
-
53
- def pgq_finish_batch(batch_id)
54
- connection.select_value(sanitize_sql_array ["SELECT pgq.finish_batch(?)", batch_id])
55
- end
56
-
57
- def pgq_get_queue_info(queue_name)
58
- connection.select_value(sanitize_sql_array ["SELECT pgq.get_queue_info(?)", queue_name])
59
- end
53
+ end
60
54
 
55
+ # == failed events
56
+
61
57
  def pgq_failed_event_retry(queue_name, consumer, event_id)
62
58
  connection.select_value(sanitize_sql_array ["SELECT * FROM pgq.failed_event_retry(?, ?, ?)", queue_name, consumer, event_id])
63
59
  end
@@ -66,7 +62,7 @@ module Pgq::Api
66
62
  connection.select_value(sanitize_sql_array ["SELECT * FROM pgq.failed_event_delete(?, ?, ?)", queue_name, consumer, event_id])
67
63
  end
68
64
 
69
- def pgq_failed_events_count(queue_name, consumer)
65
+ def pgq_failed_event_count(queue_name, consumer)
70
66
  res = connection.select_value(sanitize_sql_array ["SELECT * FROM pgq.failed_event_count(?, ?)", queue_name, consumer])
71
67
  res ? res.to_i : nil
72
68
  end
@@ -76,13 +72,24 @@ module Pgq::Api
76
72
  connection.select_all(sanitize_sql_array ["SELECT * FROM pgq.failed_event_list(?, ?, ?, ?) order by ev_id #{order}", queue_name, consumer, limit.to_i, offset.to_i])
77
73
  end
78
74
 
79
- # queue lag in seconds
80
- def pgq_queue_lag(queue_name)
81
- connection.select_value(sanitize_sql_array ["SELECT Max(EXTRACT(epoch FROM lag)) FROM pgq.get_consumer_info() where queue_name = ?", queue_name]).to_f
82
- end
75
+ # == info methods
76
+
77
+ def pgq_get_queue_info(queue_name)
78
+ connection.select_value(sanitize_sql_array ["SELECT pgq.get_queue_info(?)", queue_name])
79
+ end
80
+
81
+ # Get list of queues.
82
+ # Result: (queue_name, queue_ntables, queue_cur_table, queue_rotation_period, queue_switch_time, queue_external_ticker, queue_ticker_max_count, queue_ticker_max_lag, queue_ticker_idle_period, ticker_lag)
83
+ def pgq_get_queues_info
84
+ connection.select_values("SELECT pgq.get_queue_info()")
85
+ end
83
86
 
84
87
  def pgq_get_consumer_info
85
- connection.select_all("SELECT * FROM pgq.get_consumer_info()")
88
+ connection.select_all("SELECT *, EXTRACT(epoch FROM last_seen) AS last_seen_sec, EXTRACT(epoch FROM lag) AS lag_sec FROM pgq.get_consumer_info()")
86
89
  end
87
90
 
91
+ def pgq_get_consumer_queue_info(queue_name)
92
+ connection.select_one(sanitize_sql_array ["SELECT *, EXTRACT(epoch FROM last_seen) AS last_seen_sec, EXTRACT(epoch FROM lag) AS lag_sec FROM pgq.get_consumer_info(?)", queue_name]) || {}
93
+ end
94
+
88
95
  end
@@ -35,7 +35,7 @@ class Pgq::ConsumerBase
35
35
  end
36
36
 
37
37
  def self.set_queue_name(name)
38
- self.instance_variable_set('@queue_name', name.to_s)
38
+ @queue_name = name.to_s
39
39
  end
40
40
 
41
41
  # magic set queue_name from class name
@@ -119,14 +119,12 @@ class Pgq::ConsumerBase
119
119
  perform(type, *data)
120
120
 
121
121
  rescue Exception => ex
122
- message = event.exception_message(ex)
123
- self.log_error(message)
124
- event.failed!(message)
122
+ self.log_error(event.exception_message(ex))
123
+ event.failed!(ex)
125
124
 
126
125
  rescue => ex
127
- message = event.exception_message(ex)
128
- self.log_error(message)
129
- event.failed!(message)
126
+ self.log_error(event.exception_message(ex))
127
+ event.failed!(ex)
130
128
  end
131
129
 
132
130
  def perform(type, *data)
@@ -149,16 +147,15 @@ class Pgq::ConsumerBase
149
147
  database.pgq_event_failed(@batch_id, event_id, reason)
150
148
  end
151
149
 
152
- def event_retry(event_id)
153
- database.pgq_event_retry(@batch_id, event_id, 0)
150
+ def event_retry(event_id, seconds = 0)
151
+ database.pgq_event_retry(@batch_id, event_id, seconds)
154
152
  end
155
153
 
156
154
  def all_events_failed(events, ex)
157
- message = Pgq::Event.exception_message(ex)
158
- log_error(message)
155
+ log_error(Pgq::Event.exception_message(ex))
159
156
 
160
157
  events.each do |event|
161
- event.failed!(message)
158
+ event.failed!(ex)
162
159
  end
163
160
  end
164
161
 
@@ -8,23 +8,19 @@ class Pgq::Event
8
8
  @consumer = consumer
9
9
  end
10
10
 
11
- def failed!(ex = 'Something happens')
12
- if ex.is_a?(String)
13
- @consumer.event_failed @id, ex
14
- else # exception
15
- @consumer.event_failed @id, exception_message(ex)
16
- end
11
+ def failed!(ex)
12
+ h = {:class => ex.class.to_s, :message => ex.message, :backtrace => ex.backtrace}
13
+ @consumer.event_failed @id, consumer.coder.dump(h)
17
14
  end
18
15
 
19
- def retry!
20
- @consumer.event_retry(@id)
16
+ def retry!(seconds = 0)
17
+ @consumer.event_retry(@id, seconds)
21
18
  end
22
19
 
23
20
  def self.exception_message(e)
24
21
  <<-EXCEPTION
25
22
  Exception happend
26
- Type: #{e.class.inspect}
27
- Error occurs: #{e.message}
23
+ Error occurs: #{e.class.inspect}(#{e.message})
28
24
  Backtrace: #{e.backtrace.join("\n") rescue ''}
29
25
  EXCEPTION
30
26
  end
@@ -33,9 +29,9 @@ Backtrace: #{e.backtrace.join("\n") rescue ''}
33
29
  def exception_message(e)
34
30
  <<-EXCEPTION
35
31
  Exception happend
36
- Type: #{type.inspect} #{e.class.inspect}
32
+ Type: #{type.inspect}
37
33
  Data: #{data.inspect}
38
- Error occurs: #{e.message}
34
+ Error occurs: #{e.class.inspect}(#{e.message})
39
35
  Backtrace: #{e.backtrace.join("\n") rescue ''}
40
36
  EXCEPTION
41
37
  end
@@ -2,34 +2,30 @@ module Pgq::Utils
2
2
 
3
3
  # == all queues for database
4
4
  def queues_list
5
- database.pgq_get_consumer_info.map{|x| x['queue_name']}
5
+ database.pgq_get_consumer_info.map{|x| x['queue_name']}.uniq
6
6
  end
7
7
 
8
8
  # == methods for migrations
9
9
  def add_queue(queue_name, consumer_name = self.consumer_name)
10
- database.pgq_add_queue(queue_name, consumer_name)
10
+ database.pgq_create_queue(queue_name.to_s)
11
+ database.pgq_register_consumer(queue_name.to_s, consumer_name.to_s)
11
12
  end
12
13
 
13
14
  def remove_queue(queue_name, consumer_name = self.consumer_name)
14
- database.pgq_remove_queue(queue_name, consumer_name)
15
+ database.pgq_unregister_consumer(queue_name.to_s, consumer_name.to_s)
16
+ database.pgq_drop_queue(queue_name.to_s)
15
17
  end
16
18
 
17
19
  # == inspect queue
18
20
  # { type => events_count }
19
21
  def inspect_queue(queue_name)
20
- ticks = database.pgq_get_queue_info(queue_name)
21
- table = connection.select_value("SELECT queue_data_pfx as table FROM pgq.queue where queue_name = #{database.sanitize(queue_name)}")
22
+ table, last_event = last_event_id(queue_name)
22
23
 
23
- result = {}
24
-
25
- if ticks['current_batch']
26
- sql = connection.select_value("SELECT * from pgq.batch_event_sql(#{database.sanitize(ticks['current_batch'].to_i)})")
27
- last_event = connection.select_value("SELECT MAX(ev_id) AS count FROM (#{sql}) AS x")
28
-
24
+ if last_event
29
25
  stats = connection.select_all <<-SQL
30
26
  SELECT count(*) as count, ev_type
31
27
  FROM #{table}
32
- WHERE ev_id > #{database.sanitize(last_event.to_i)}
28
+ WHERE ev_id > #{last_event.to_i}
33
29
  GROUP BY ev_type
34
30
  SQL
35
31
 
@@ -59,19 +55,13 @@ module Pgq::Utils
59
55
  # show hash stats, for londiste type of storage events
60
56
  # { type => events_count }
61
57
  def inspect_londiste_queue(queue_name)
62
- ticks = database.pgq_get_consumer_info
63
- table = connection.select_value(connection.sanitize_sql_array ["SELECT queue_data_pfx as table FROM pgq.queue where queue_name = ?", queue_name])
58
+ table, last_event = last_event_id(queue_name)
64
59
 
65
- result = {}
66
-
67
- if ticks['current_batch']
68
- sql = connection.select_value("SELECT * from pgq.batch_event_sql(#{database.sanitize(ticks['current_batch'].to_i)})")
69
- last_event = connection.select_value("SELECT MAX(ev_id) AS count FROM (#{sql}) AS x")
70
-
60
+ if last_event
71
61
  stats = connection.select_all <<-SQL
72
62
  SELECT count(*) as count, ev_type, ev_extra1
73
63
  FROM #{table}
74
- WHERE ev_id > #{database.sanitize(last_event.to_i)}
64
+ WHERE ev_id > #{last_event.to_i}
75
65
  GROUP BY ev_type, ev_extra1
76
66
  SQL
77
67
 
@@ -97,14 +87,14 @@ module Pgq::Utils
97
87
 
98
88
  # == proxing method for tests
99
89
  def proxy(method_name)
100
- self.should_receive(method_name) do |*data|
90
+ self.should_receive(:enqueue) do |method_name, *data|
101
91
  x = self.coder.load(self.coder.dump(data))
102
92
  self.new.send(:perform, method_name, *x)
103
93
  end.any_number_of_times
104
94
  end
105
95
 
106
96
  # == resend failed events in queue
107
- def resend_failed_events(queue_name, limit = 5_000)
97
+ def retry_failed_events(queue_name, limit = 5_000)
108
98
  events = database.pgq_failed_event_list(queue_name, self.consumer_name, limit, nil, 'asc') || []
109
99
 
110
100
  events.each do |event|
@@ -114,7 +104,7 @@ module Pgq::Utils
114
104
  events.length
115
105
  end
116
106
 
117
- def clear_failed_events(queue_name, limit = 5_000)
107
+ def delete_failed_events(queue_name, limit = 5_000)
118
108
  events = database.pgq_failed_event_list(queue_name, self.consumer_name, limit, nil, 'asc') || []
119
109
 
120
110
  events.each do |event|
@@ -124,4 +114,19 @@ module Pgq::Utils
124
114
  events.length
125
115
  end
126
116
 
117
+ def last_event_id(queue_name)
118
+ ticks = database.pgq_get_consumer_queue_info(queue_name)
119
+ table = connection.select_value("SELECT queue_data_pfx AS table FROM pgq.queue WHERE queue_name = #{database.sanitize(queue_name)}")
120
+
121
+ result = nil
122
+
123
+ if ticks['current_batch']
124
+ sql = connection.select_value("SELECT * FROM pgq.batch_event_sql(#{database.sanitize(ticks['current_batch'].to_i)})")
125
+ last_event = connection.select_value("SELECT MAX(ev_id) AS count FROM (#{sql}) AS x")
126
+ result = last_event.to_i
127
+ end
128
+
129
+ [table, result]
130
+ end
131
+
127
132
  end
@@ -1,3 +1,3 @@
1
1
  module Pgq
2
- VERSION = "0.1"
2
+ VERSION = "0.1.1"
3
3
  end
@@ -29,20 +29,20 @@ class Pgq::Worker
29
29
  @logger = h[:logger] || (defined?(Rails) && Rails.logger) || Logger.new(STDOUT)
30
30
  @consumers = []
31
31
 
32
- queues = h[:queues]
33
- raise "Queue not selected" if queues.blank?
32
+ @queues = h[:queues]
33
+ raise "Queue not selected" if @queues.blank?
34
34
 
35
- if queues == ['all'] || queues == 'all'
35
+ if @queues == ['all'] || @queues == 'all'
36
36
  if defined?(Rails) && File.exists?(Rails.root + "config/queues_list.yml")
37
- queues = YAML.load_file(Rails.root + "config/queues_list.yml")
37
+ @queues = YAML.load_file(Rails.root + "config/queues_list.yml")
38
38
  else
39
39
  raise "You shoud create config/queues_list.yml for all queues"
40
40
  end
41
41
  end
42
42
 
43
- queues = queues.split(',') if queues.is_a?(String)
43
+ @queues = @queues.split(',') if @queues.is_a?(String)
44
44
 
45
- queues.each do |queue|
45
+ @queues.each do |queue|
46
46
  klass = Pgq::Worker.predict_queue_class(queue)
47
47
  if klass
48
48
  @consumers << klass.new(@logger, queue)
@@ -72,7 +72,7 @@ class Pgq::Worker
72
72
  end
73
73
 
74
74
  def run
75
- logger.info "Worker start"
75
+ logger.info "Worker for (#{@queues.join(",")}) started"
76
76
 
77
77
  loop do
78
78
  processed_count = process_batch
@@ -105,8 +105,9 @@ describe Pgq::ConsumerBase do
105
105
  end
106
106
 
107
107
  it "all_events_failed" do
108
- @event.should_receive(:failed!).with(an_instance_of(String))
109
- @consumer.all_events_failed(@events, Exception.new('wow'))
108
+ ex = Exception.new('wow')
109
+ @event.should_receive(:failed!).with(ex)
110
+ @consumer.all_events_failed(@events, ex)
110
111
  end
111
112
 
112
113
  it "perform_events" do
@@ -120,8 +121,9 @@ describe Pgq::ConsumerBase do
120
121
  end
121
122
 
122
123
  it "perform_event raised" do
123
- @consumer.should_receive(:perform).with('bla', *@data).and_throw(:wow)
124
- @event.should_receive(:failed!).with(an_instance_of(String))
124
+ ex = Exception.new('wow')
125
+ @consumer.should_receive(:perform).with('bla', *@data).and_raise(ex)
126
+ @event.should_receive(:failed!).with(ex)
125
127
  @consumer.perform_event(@event)
126
128
  end
127
129
 
@@ -129,19 +131,21 @@ describe Pgq::ConsumerBase do
129
131
 
130
132
  describe "migration" do
131
133
  it "up" do
132
- Pgq::ConsumerBase.database.should_receive(:pgq_add_queue).with('super', Pgq::ConsumerBase.consumer_name)
134
+ Pgq::ConsumerBase.database.should_receive(:pgq_create_queue).with('super')
135
+ Pgq::ConsumerBase.database.should_receive(:pgq_register_consumer).with('super', Pgq::ConsumerBase.consumer_name)
133
136
  Pgq::ConsumerBase.add_queue("super")
134
137
  end
135
138
 
136
139
  it "down" do
137
- Pgq::ConsumerBase.database.should_receive(:pgq_remove_queue).with('super', Pgq::ConsumerBase.consumer_name)
140
+ Pgq::ConsumerBase.database.should_receive(:pgq_drop_queue).with('super')
141
+ Pgq::ConsumerBase.database.should_receive(:pgq_unregister_consumer).with('super', Pgq::ConsumerBase.consumer_name)
138
142
  Pgq::ConsumerBase.remove_queue("super")
139
143
  end
140
144
  end
141
145
 
142
146
  it "should proxy consumer" do
143
147
  PgqTata3.proxy(:ptest)
144
- PgqTata3.ptest(111, 'abc').should == 10
148
+ PgqTata3.enqueue(:ptest, 111, 'abc').should == 10
145
149
  $a.should == 111
146
150
  $b.should == 'abc'
147
151
  end
@@ -21,11 +21,11 @@ describe Pgq::Event do
21
21
 
22
22
  it "should failed!" do
23
23
  @consumer.should_receive(:event_failed).with(123, an_instance_of(String))
24
- @ev.failed!
24
+ @ev.failed!(Exception.new("haha"))
25
25
  end
26
26
 
27
27
  it "should retry!" do
28
- @consumer.should_receive(:event_retry).with(123)
28
+ @consumer.should_receive(:event_retry).with(123, 0)
29
29
  @ev.retry!
30
30
  end
31
31
 
metadata CHANGED
@@ -1,12 +1,13 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: pgq
3
3
  version: !ruby/object:Gem::Version
4
- hash: 9
4
+ hash: 25
5
5
  prerelease: false
6
6
  segments:
7
7
  - 0
8
8
  - 1
9
- version: "0.1"
9
+ - 1
10
+ version: 0.1.1
10
11
  platform: ruby
11
12
  authors:
12
13
  - Makarchev Konstantin
@@ -14,7 +15,7 @@ autorequire: init
14
15
  bindir: bin
15
16
  cert_chain: []
16
17
 
17
- date: 2012-05-09 00:00:00 +04:00
18
+ date: 2012-05-16 00:00:00 +04:00
18
19
  default_executable:
19
20
  dependencies:
20
21
  - !ruby/object:Gem::Dependency