postqueue 0.1.0 → 0.2.1
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/README.md +40 -33
- data/lib/postqueue/base/callback.rb +23 -0
- data/lib/postqueue/base/enqueue.rb +24 -12
- data/lib/postqueue/base/processing.rb +18 -88
- data/lib/postqueue/base/select_and_lock.rb +47 -0
- data/lib/postqueue/base.rb +19 -10
- data/lib/postqueue/item.rb +7 -2
- data/lib/postqueue/version.rb +1 -1
- data/lib/postqueue.rb +3 -3
- data/spec/postqueue/enqueue_spec.rb +49 -14
- data/spec/postqueue/postqueue_spec.rb +1 -1
- data/spec/postqueue/process_errors_spec.rb +9 -6
- data/spec/postqueue/process_spec.rb +80 -0
- data/spec/spec_helper.rb +12 -14
- data/spec/support/configure_active_record.rb +8 -9
- metadata +19 -6
- data/spec/postqueue/idempotent_queue_spec.rb +0 -89
- data/spec/postqueue/process_one_spec.rb +0 -40
- data/spec/support/models.rb +0 -123
- data/spec/support/schema.rb +0 -88
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA1:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 7b6f8a8ad0f1aef1b565a223153f6280a9361314
|
4
|
+
data.tar.gz: 4dbf3727e217a8c248ce37fc78715aaf9080f0ea
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 569162000045134eb3107231ed94c6b52e15f5aed0e47ec2d681502037d142fe0cbd7ac9e85b2361e5fbeced0a0bd01fce6bee8cea4ef258cf0b2b19c46afb4e
|
7
|
+
data.tar.gz: 6c50d7e414a6b411e4066f6d2351d4d68310f8df847299911db0c7fbba2b25f82550a2db9f3ed8b0612c7777e4a415da2fbdcba9e96bcf92badb0bc602c24e60
|
data/README.md
CHANGED
@@ -13,56 +13,63 @@ Why not using another queue implementation? postqueue comes with some extras:
|
|
13
13
|
|
14
14
|
```ruby
|
15
15
|
queue = PostgresQL::Base.new
|
16
|
-
queue.enqueue op: "
|
17
|
-
queue.process do |op,
|
16
|
+
queue.enqueue op: "product/reindex", entity_id: [12,13,14,15]
|
17
|
+
queue.process do |op, entity_ids|
|
18
18
|
# note: entity_ids is always an Array of ids.
|
19
|
-
case
|
20
|
-
when "reindex
|
19
|
+
case op
|
20
|
+
when "product/reindex"
|
21
21
|
Product.index_many(Product.where(id: entity_ids))
|
22
22
|
else
|
23
|
-
raise "Unsupported op
|
23
|
+
raise "Unsupported op: #{op}"
|
24
24
|
end
|
25
25
|
end
|
26
26
|
```
|
27
27
|
|
28
|
-
The
|
29
|
-
|
30
|
-
queue entries
|
28
|
+
The process call will select a number of queue items for processing. They will all have
|
29
|
+
the same `op` attribute. The callback will receive the `op` attribute and the `entity_ids`
|
30
|
+
of all queue entries selected for processing. The `processing` method will return the
|
31
|
+
return value of the block.
|
31
32
|
|
32
|
-
|
33
|
-
|
34
|
-
|
35
|
-
been sent to the block. This is highly unrecommended though, since when using a block to do processing errors
|
36
|
-
and exceptions can properly be dealt with.
|
33
|
+
If no callback is given the return value will be the `[op, entity_ids]` values
|
34
|
+
that would have been sent to the block. This is highly unrecommended though, since
|
35
|
+
when using a block to do processing errors and exceptions can properly be dealt with.
|
37
36
|
|
38
37
|
Postqueue.process also accepts the following arguments:
|
39
38
|
|
40
|
-
- `entity_type`: only process entries with this `entity_type`;
|
41
39
|
- `op`: only process entries with this `op` value;
|
42
|
-
- `batch_size`:
|
40
|
+
- `batch_size`: maximum number of items to process in one go.
|
43
41
|
|
44
42
|
Example:
|
45
43
|
|
46
|
-
Postqueue.process(
|
47
|
-
# only handle
|
44
|
+
Postqueue.process(op: 'product/reindex', batch_size: 10) do |op, entity_ids|
|
45
|
+
# only handle up to 10 "product/reindex" entries
|
48
46
|
end
|
49
47
|
|
50
|
-
If the
|
48
|
+
If the block fails, by either returning `false` or by raising an exception the queue will
|
49
|
+
postpone processing these entries by an increasing amount of time, up until
|
50
|
+
`Postqueue::MAX_ATTEMPTS` failed attempts. The current MAX_ATTEMPTS definition
|
51
|
+
leads to a maximum postpone interval (currently up to 190 seconds).
|
52
|
+
|
53
|
+
If the queue is empty or no matching queue entry could be found, `Postqueue.process`
|
54
|
+
returns nil.
|
51
55
|
|
52
56
|
### process a single entry
|
53
57
|
|
54
|
-
|
58
|
+
Postqueue implements a shortcut to process only a single entry. Under the hood this
|
59
|
+
calls `Postqueue.process` with `batch_size` set to `1`:
|
55
60
|
|
56
|
-
Postqueue.process_one do |op,
|
61
|
+
Postqueue.process_one do |op, entity_ids|
|
57
62
|
end
|
58
63
|
|
59
|
-
Note that even though `process_one` will only ever process a single entry the
|
64
|
+
Note that even though `process_one` will only ever process a single entry the
|
65
|
+
`entity_ids` parameter to the block is still an array (holding a single ID
|
66
|
+
in that case).
|
60
67
|
|
61
68
|
## idempotent operations
|
62
69
|
|
63
|
-
|
64
|
-
|
65
|
-
|
70
|
+
Postqueue comes with simple support for idempotent operations: if an operation is deemed
|
71
|
+
idempotent it is not enqueued again if it can be found in the queue already. Note that
|
72
|
+
a queue item will be created if another item is currently being processed.
|
66
73
|
|
67
74
|
class Testqueue < Postqueue::Base
|
68
75
|
def idempotent?(entity_type:,op:)
|
@@ -72,22 +79,22 @@ ops as idempotent:
|
|
72
79
|
|
73
80
|
## batch processing
|
74
81
|
|
75
|
-
Often queue items can be processed in batches for a better performance of the entire system.
|
76
|
-
batch processing for some items subclass `Postqueue::Base` and reimplement the
|
77
|
-
to return a suggested
|
78
|
-
for all queue entries:
|
82
|
+
Often queue items can be processed in batches for a better performance of the entire system.
|
83
|
+
To allow batch processing for some items subclass `Postqueue::Base` and reimplement the
|
84
|
+
`batch_size?` method to return a suggested batch size for a specific operation.
|
85
|
+
The following implements a batch_size of 100 for all queue entries:
|
79
86
|
|
80
|
-
class
|
81
|
-
def batch_size(
|
87
|
+
class Batchqueue < Postqueue::Base
|
88
|
+
def batch_size(op:)
|
82
89
|
100
|
83
90
|
end
|
84
91
|
end
|
85
92
|
|
86
93
|
## Searchable via SQL
|
87
94
|
|
88
|
-
In contrast to other queue implementations available for Rubyists this queue formats
|
89
|
-
makes it possible to query the queue via SQL. On the other
|
90
|
-
enqueue arbitrary entries as these others do.
|
95
|
+
In contrast to other queue implementations available for Rubyists this queue formats
|
96
|
+
entries in a way that makes it possible to query the queue via SQL. On the other
|
97
|
+
hand this queue also does not allow to enqueue arbitrary entries as these others do.
|
91
98
|
|
92
99
|
## Installation
|
93
100
|
|
@@ -0,0 +1,23 @@
|
|
1
|
+
module Postqueue
|
2
|
+
class Base
|
3
|
+
def run_callback(op:, entity_ids:, &_block)
|
4
|
+
queue_times = item_class.find_by_sql <<-SQL
|
5
|
+
SELECT extract('epoch' from AVG(now() - created_at)) AS avg,
|
6
|
+
extract('epoch' from MAX(now() - created_at)) AS max
|
7
|
+
FROM #{item_class.table_name} WHERE entity_id IN (#{entity_ids.join(',')})
|
8
|
+
SQL
|
9
|
+
queue_time = queue_times.first
|
10
|
+
|
11
|
+
# run callback.
|
12
|
+
result = [ op, entity_ids ]
|
13
|
+
|
14
|
+
total_processing_time = Benchmark.realtime do
|
15
|
+
result = yield(*result) if block_given?
|
16
|
+
end
|
17
|
+
|
18
|
+
timing = Timing.new(queue_time.avg, queue_time.max, total_processing_time, total_processing_time / entity_ids.length)
|
19
|
+
|
20
|
+
[ result, timing ]
|
21
|
+
end
|
22
|
+
end
|
23
|
+
end
|
@@ -1,19 +1,31 @@
|
|
1
1
|
module Postqueue
|
2
2
|
class Base
|
3
|
-
|
4
|
-
|
5
|
-
|
3
|
+
# Enqueues an queue item. If the operation is duplicate, and an entry with
|
4
|
+
# the same combination of op and entity_id exists already, no new entry will
|
5
|
+
# be added to the queue.
|
6
|
+
#
|
7
|
+
# [TODO] An optimized code path, talking directly to PG, might be faster by a factor of 4 or so.
|
8
|
+
def enqueue(op:, entity_id:, duplicate: true)
|
9
|
+
if entity_id.is_a?(Array)
|
10
|
+
enqueue_many(op: op, entity_ids: entity_id, duplicate: duplicate)
|
11
|
+
return
|
12
|
+
end
|
6
13
|
|
7
|
-
|
8
|
-
|
9
|
-
|
10
|
-
# binds << ActiveRecord::Attribute.from_user("name", op, ::ActiveRecord::Type::String.new)
|
11
|
-
# binds << ActiveRecord::Attribute.from_user("entity_type", entity_type, ::ActiveRecord::Type::String.new)
|
12
|
-
# binds << ActiveRecord::Attribute.from_user("entity_id", entity_id, ::ActiveRecord::Type::Integer.new)
|
13
|
-
# # Note: Rails 4 does not understand prepare: true
|
14
|
-
# db.exec_query(sql, 'SQL', binds, prepare: true)
|
14
|
+
if !duplicate && item_class.where(op: op, entity_id: entity_id).present?
|
15
|
+
return
|
16
|
+
end
|
15
17
|
|
16
|
-
item_class.create!(op: op,
|
18
|
+
item_class.create!(op: op, entity_id: entity_id)
|
19
|
+
end
|
20
|
+
|
21
|
+
private
|
22
|
+
|
23
|
+
def enqueue_many(op:, entity_ids:, duplicate:) #:nodoc:
|
24
|
+
item_class.transaction do
|
25
|
+
entity_ids.each do |entity_id|
|
26
|
+
enqueue(op: op, entity_id: entity_id, duplicate: duplicate)
|
27
|
+
end
|
28
|
+
end
|
17
29
|
end
|
18
30
|
end
|
19
31
|
end
|
@@ -2,125 +2,55 @@ module Postqueue
|
|
2
2
|
MAX_ATTEMPTS = 5
|
3
3
|
|
4
4
|
class Base
|
5
|
-
|
6
5
|
# Processes many entries
|
7
6
|
#
|
8
7
|
# process batch_size: 100
|
9
|
-
def process(
|
8
|
+
def process(op: nil, batch_size: 100, &block)
|
10
9
|
status, result = item_class.transaction do
|
11
|
-
process_inside_transaction(
|
10
|
+
process_inside_transaction(op: op, batch_size: batch_size, &block)
|
12
11
|
end
|
13
12
|
|
14
13
|
raise result if status == :err
|
15
14
|
result
|
16
15
|
end
|
17
16
|
|
18
|
-
def process_one(
|
19
|
-
process(
|
20
|
-
end
|
21
|
-
|
22
|
-
def idempotent?(entity_type:, op:)
|
23
|
-
false
|
24
|
-
end
|
25
|
-
|
26
|
-
def batch_size(entity_type:, op:)
|
27
|
-
10
|
17
|
+
def process_one(op: nil, &block)
|
18
|
+
process(op: op, batch_size: 1, &block)
|
28
19
|
end
|
29
20
|
|
30
21
|
private
|
31
22
|
|
32
|
-
# Select and lock up to \a limit unlocked items in the queue.
|
33
|
-
def select_and_lock(relation, limit:)
|
34
|
-
relation = relation.where("failed_attempts < ? AND next_run_at < ?", MAX_ATTEMPTS, Time.now).order(:next_run_at, :id)
|
35
|
-
|
36
|
-
sql = relation.to_sql + " FOR UPDATE SKIP LOCKED"
|
37
|
-
sql += " LIMIT #{limit}" if limit
|
38
|
-
items = item_class.find_by_sql(sql)
|
39
|
-
|
40
|
-
items
|
41
|
-
end
|
42
|
-
|
43
|
-
def calculate_batch_size(op:, entity_type:, batch_size:)
|
44
|
-
processor_batch_size = self.batch_size(op: op, entity_type: entity_type)
|
45
|
-
if !processor_batch_size || processor_batch_size < 2
|
46
|
-
1
|
47
|
-
elsif(!batch_size)
|
48
|
-
processor_batch_size
|
49
|
-
else
|
50
|
-
[ processor_batch_size, batch_size ].min
|
51
|
-
end
|
52
|
-
end
|
53
|
-
|
54
23
|
# The actual processing. Returns [ :ok, number-of-items ] or [ :err, exception ]
|
55
|
-
def process_inside_transaction(
|
56
|
-
|
57
|
-
relation = relation.where(entity_type: entity_type) if entity_type
|
58
|
-
relation = relation.where(op: op) if op
|
59
|
-
|
60
|
-
first_match = select_and_lock(relation, limit: 1).first
|
61
|
-
return [ :ok, nil ] unless first_match
|
62
|
-
op, entity_type = first_match.op, first_match.entity_type
|
24
|
+
def process_inside_transaction(op:, batch_size:, &block)
|
25
|
+
batch = select_and_lock_batch(op: op, batch_size: batch_size)
|
63
26
|
|
64
|
-
|
65
|
-
|
66
|
-
# number > 0, then the passed in batch_size provides an additional upper limit.
|
67
|
-
batch_size = calculate_batch_size(op: op, entity_type: entity_type, batch_size: batch_size)
|
68
|
-
if batch_size > 1
|
69
|
-
batch_relation = relation.where(entity_type: entity_type, op: op)
|
70
|
-
batch = select_and_lock(batch_relation, limit: batch_size)
|
71
|
-
else
|
72
|
-
batch = [ first_match ]
|
73
|
-
end
|
27
|
+
match = batch.first
|
28
|
+
return [ :ok, nil ] unless match
|
74
29
|
|
75
30
|
entity_ids = batch.map(&:entity_id)
|
76
|
-
|
77
|
-
# If the current operation is idempotent we will mark additional queue items as
|
78
|
-
# in process.
|
79
|
-
if idempotent?(op: op, entity_type: entity_type)
|
80
|
-
entity_ids.uniq!
|
81
|
-
process_relations = relation.where(entity_type: entity_type, op: op, entity_id: entity_ids)
|
82
|
-
items_in_processing = select_and_lock(process_relations, limit: nil)
|
83
|
-
else
|
84
|
-
items_in_processing = batch
|
85
|
-
end
|
86
|
-
|
87
|
-
items_in_processing_ids = items_in_processing.map(&:id)
|
88
|
-
|
89
|
-
queue_times = item_class.find_by_sql <<-SQL
|
90
|
-
SELECT extract('epoch' from AVG(now() - created_at)) AS avg,
|
91
|
-
extract('epoch' from MAX(now() - created_at)) AS max
|
92
|
-
FROM #{item_class.table_name} WHERE entity_id IN (#{entity_ids.join(",")})
|
93
|
-
SQL
|
94
|
-
queue_time = queue_times.first
|
95
|
-
|
96
|
-
# run callback.
|
97
|
-
result = [ op, entity_type, entity_ids ]
|
98
|
-
|
99
|
-
processing_time = Benchmark.realtime do
|
100
|
-
result = yield *result if block_given?
|
101
|
-
end
|
31
|
+
result, timing = run_callback(op: match.op, entity_ids: entity_ids, &block)
|
102
32
|
|
103
33
|
# Depending on the result either reprocess or delete all items
|
104
34
|
if result == false
|
105
|
-
postpone
|
35
|
+
postpone batch.map(&:id)
|
106
36
|
else
|
107
|
-
on_processing(op,
|
108
|
-
item_class.where(id:
|
37
|
+
on_processing(match.op, entity_ids, timing)
|
38
|
+
item_class.where(id: batch.map(&:id)).delete_all
|
109
39
|
end
|
110
40
|
|
111
41
|
[ :ok, result ]
|
112
42
|
rescue => e
|
113
|
-
on_exception(e, op,
|
114
|
-
postpone
|
43
|
+
on_exception(e, match.op, entity_ids)
|
44
|
+
postpone batch.map(&:id)
|
115
45
|
[ :err, e ]
|
116
46
|
end
|
117
47
|
|
118
48
|
def postpone(ids)
|
119
49
|
item_class.connection.exec_query <<-SQL
|
120
|
-
UPDATE #{item_class.table_name}
|
121
|
-
SET failed_attempts = failed_attempts+1,
|
122
|
-
next_run_at = next_run_at + power(failed_attempts + 1, 1.5) * interval '10 second'
|
123
|
-
WHERE id IN (#{ids.join(
|
50
|
+
UPDATE #{item_class.table_name}
|
51
|
+
SET failed_attempts = failed_attempts+1,
|
52
|
+
next_run_at = next_run_at + power(failed_attempts + 1, 1.5) * interval '10 second'
|
53
|
+
WHERE id IN (#{ids.join(',')})
|
124
54
|
SQL
|
125
55
|
end
|
126
56
|
end
|
@@ -0,0 +1,47 @@
|
|
1
|
+
module Postqueue
|
2
|
+
class Base
|
3
|
+
# Select and lock up to \a limit unlocked items in the queue.
|
4
|
+
def select_and_lock(relation, limit:)
|
5
|
+
# Ordering by next_run_at and id should not strictly be necessary, but helps
|
6
|
+
# processing entries in the passed in order when enqueued at the same time.
|
7
|
+
relation = relation.where("failed_attempts < ? AND next_run_at < ?", MAX_ATTEMPTS, Time.now).order(:next_run_at, :id)
|
8
|
+
|
9
|
+
# FOR UPDATE SKIP LOCKED selects and locks entries, but skips those that
|
10
|
+
# are already locked - preventing this transaction from being locked.
|
11
|
+
sql = relation.to_sql + " FOR UPDATE SKIP LOCKED"
|
12
|
+
sql += " LIMIT #{limit}" if limit
|
13
|
+
item_class.find_by_sql(sql)
|
14
|
+
end
|
15
|
+
|
16
|
+
# returns a batch of queue items for processing. These queue items are choosen
|
17
|
+
# depending on the passed in op: and batch_size: settings (if any).
|
18
|
+
#
|
19
|
+
# All selected queue items will have the same op value. If an op: value is
|
20
|
+
# passed in, that one is chosen as a filter condition, otherwise the op value
|
21
|
+
# of the first queue entry is used insteatd.
|
22
|
+
#
|
23
|
+
# This method will at maximum select and lock batch_size items. If the batch_size
|
24
|
+
# returned by the #batch_size method is smaller than the passed in value here
|
25
|
+
# that one is used instead.
|
26
|
+
def select_and_lock_batch(op:, batch_size:, &_block)
|
27
|
+
relation = item_class.all
|
28
|
+
relation = relation.where(op: op) if op
|
29
|
+
|
30
|
+
match = select_and_lock(relation, limit: 1).first
|
31
|
+
return [] unless match
|
32
|
+
|
33
|
+
batch_size = calculate_batch_size(op: match.op, max_batch_size: batch_size)
|
34
|
+
return [ match ] if batch_size <= 1
|
35
|
+
|
36
|
+
batch_relation = relation.where(op: match.op)
|
37
|
+
select_and_lock(batch_relation, limit: batch_size)
|
38
|
+
end
|
39
|
+
|
40
|
+
def calculate_batch_size(op:, max_batch_size:)
|
41
|
+
recommended_batch_size = batch_size(op: op) || 1
|
42
|
+
return 1 if recommended_batch_size < 2
|
43
|
+
return recommended_batch_size unless max_batch_size
|
44
|
+
max_batch_size < recommended_batch_size ? max_batch_size : recommended_batch_size
|
45
|
+
end
|
46
|
+
end
|
47
|
+
end
|
data/lib/postqueue/base.rb
CHANGED
@@ -1,7 +1,14 @@
|
|
1
1
|
module Postqueue
|
2
|
+
Timing = Struct.new(:avg_queue_time, :max_queue_time, :total_processing_time, :processing_time)
|
3
|
+
|
2
4
|
class Base
|
3
5
|
private
|
4
6
|
|
7
|
+
def batch_size(op:)
|
8
|
+
_ = op
|
9
|
+
1
|
10
|
+
end
|
11
|
+
|
5
12
|
def item_class
|
6
13
|
Postqueue::Item
|
7
14
|
end
|
@@ -10,23 +17,25 @@ module Postqueue
|
|
10
17
|
Postqueue.logger
|
11
18
|
end
|
12
19
|
|
13
|
-
def on_processing(op,
|
14
|
-
|
20
|
+
def on_processing(op, entity_ids, timing)
|
21
|
+
msg = "processing '#{op}' for id(s) #{entity_ids.join(',')}: "
|
22
|
+
msg += "processing #{entity_ids.length} items took #{'%.3f msecs' % timing.total_processing_time}"
|
23
|
+
|
24
|
+
msg += ", queue_time: avg: #{'%.3f msecs' % timing.avg_queue_time}/max: #{'%.3f msecs' % timing.max_queue_time}"
|
25
|
+
logger.info msg
|
15
26
|
end
|
16
27
|
|
17
|
-
def on_exception(exception, op,
|
18
|
-
logger.warn "processing '#{op}
|
28
|
+
def on_exception(exception, op, entity_ids)
|
29
|
+
logger.warn "processing '#{op}' for id(s) #{entity_ids.inspect}: caught #{exception}"
|
19
30
|
end
|
20
31
|
end
|
21
32
|
|
22
33
|
def self.logger
|
23
34
|
Logger.new(STDERR)
|
24
35
|
end
|
25
|
-
|
26
|
-
def self.new
|
27
|
-
Base.new
|
28
|
-
end
|
29
36
|
end
|
30
37
|
|
31
|
-
require
|
32
|
-
require
|
38
|
+
require "postqueue/base/enqueue"
|
39
|
+
require "postqueue/base/select_and_lock"
|
40
|
+
require "postqueue/base/processing"
|
41
|
+
require "postqueue/base/callback"
|
data/lib/postqueue/item.rb
CHANGED
@@ -14,16 +14,21 @@ module Postqueue
|
|
14
14
|
def self.migrate!(table_name = "postqueue")
|
15
15
|
Item.connection.execute <<-SQL
|
16
16
|
CREATE TABLE #{table_name} (
|
17
|
-
id SERIAL PRIMARY KEY,
|
17
|
+
id SERIAL PRIMARY KEY,
|
18
18
|
op VARCHAR,
|
19
|
-
entity_type VARCHAR,
|
20
19
|
entity_id INTEGER NOT NULL DEFAULT 0,
|
21
20
|
created_at timestamp without time zone NOT NULL DEFAULT (now() at time zone 'utc'),
|
22
21
|
next_run_at timestamp without time zone NOT NULL DEFAULT (now() at time zone 'utc'),
|
23
22
|
failed_attempts INTEGER NOT NULL DEFAULT 0
|
24
23
|
);
|
25
24
|
|
25
|
+
-- This index should be usable to find duplicate duplicates in the table. While
|
26
|
+
-- we search for entries with matching op and entity_id, we assume that entity_id
|
27
|
+
-- has a much higher cardinality.
|
26
28
|
CREATE INDEX #{table_name}_idx1 ON #{table_name}(entity_id);
|
29
|
+
|
30
|
+
-- This index should help picking the next entries to run. Otherwise a full tablescan
|
31
|
+
-- would be necessary whenevr we check out items.
|
27
32
|
CREATE INDEX #{table_name}_idx2 ON #{table_name}(next_run_at);
|
28
33
|
SQL
|
29
34
|
end
|
data/lib/postqueue/version.rb
CHANGED
data/lib/postqueue.rb
CHANGED
@@ -1,23 +1,58 @@
|
|
1
|
-
require
|
1
|
+
require "spec_helper"
|
2
2
|
|
3
|
-
describe
|
4
|
-
let(:queue) { Postqueue.new }
|
3
|
+
describe "enqueuing" do
|
4
|
+
let(:queue) { Postqueue::Base.new }
|
5
|
+
let(:item) { queue.items.first }
|
5
6
|
|
6
|
-
|
7
|
-
|
7
|
+
context "when enqueueing entries" do
|
8
|
+
before do
|
9
|
+
queue.enqueue op: "myop", entity_id: 12
|
10
|
+
end
|
11
|
+
|
12
|
+
it "enqueues items" do
|
13
|
+
expect(item.op).to eq("myop")
|
14
|
+
expect(item.entity_id).to eq(12)
|
15
|
+
end
|
16
|
+
|
17
|
+
it "sets defaults" do
|
18
|
+
expect(item.created_at).to be > (Time.now - 1.second)
|
19
|
+
expect(item.next_run_at).to be > (Time.now - 1.second)
|
20
|
+
expect(item.failed_attempts).to eq(0)
|
21
|
+
end
|
8
22
|
end
|
9
23
|
|
10
|
-
|
24
|
+
context "when enqueueing identical duplicate entries" do
|
25
|
+
before do
|
26
|
+
queue.enqueue op: "duplicate", entity_id: 12, duplicate: duplicate
|
27
|
+
queue.enqueue op: "duplicate", entity_id: 13, duplicate: duplicate
|
28
|
+
queue.enqueue op: "duplicate", entity_id: 12, duplicate: duplicate
|
29
|
+
queue.enqueue op: "duplicate", entity_id: 12, duplicate: duplicate
|
30
|
+
queue.enqueue op: "duplicate", entity_id: 12, duplicate: duplicate
|
31
|
+
queue.enqueue op: "no-duplicate", entity_id: 13, duplicate: duplicate
|
32
|
+
end
|
33
|
+
|
34
|
+
context "when duplicates are permitted" do
|
35
|
+
let(:duplicate) { true }
|
36
|
+
|
37
|
+
it "does not skip duplicates" do
|
38
|
+
expect(items.map(&:entity_id)).to eq([12, 13, 12, 12, 12, 13])
|
39
|
+
end
|
40
|
+
end
|
41
|
+
|
42
|
+
context "when duplicates are not permitted" do
|
43
|
+
let(:duplicate) { false }
|
11
44
|
|
12
|
-
|
13
|
-
|
14
|
-
|
15
|
-
|
45
|
+
it "skips later duplicates" do
|
46
|
+
expect(items.map(&:entity_id)).to eq([12, 13, 13])
|
47
|
+
end
|
48
|
+
end
|
16
49
|
end
|
17
50
|
|
18
|
-
|
19
|
-
|
20
|
-
|
21
|
-
|
51
|
+
context "when enqueueing many entries" do
|
52
|
+
it "adds all entries skipping duplicates" do
|
53
|
+
queue.enqueue op: "duplicate", entity_id: 12, duplicate: false
|
54
|
+
queue.enqueue op: "duplicate", entity_id: [13, 12, 12, 13, 14], duplicate: false
|
55
|
+
expect(items.map(&:entity_id)).to eq([12, 13, 14])
|
56
|
+
end
|
22
57
|
end
|
23
58
|
end
|
@@ -1,17 +1,17 @@
|
|
1
|
-
require
|
1
|
+
require "spec_helper"
|
2
2
|
|
3
3
|
describe "::queue.process_one" do
|
4
|
-
let(:queue) { Postqueue.new }
|
4
|
+
let(:queue) { Postqueue::Base.new }
|
5
5
|
|
6
6
|
class E < RuntimeError; end
|
7
7
|
|
8
8
|
before do
|
9
|
-
queue.enqueue op: "
|
9
|
+
queue.enqueue op: "mytype", entity_id: 12
|
10
10
|
end
|
11
11
|
|
12
12
|
context "block raises an exception" do
|
13
13
|
before do
|
14
|
-
expect { queue.process_one
|
14
|
+
expect { queue.process_one { |_op, _type, _ids| raise E } }.to raise_error(E)
|
15
15
|
end
|
16
16
|
|
17
17
|
it "reraises the exception" do
|
@@ -29,7 +29,7 @@ describe "::queue.process_one" do
|
|
29
29
|
|
30
30
|
context "block returns false" do
|
31
31
|
before do
|
32
|
-
@result = queue.process_one
|
32
|
+
@result = queue.process_one { |_op, _type, _ids| false }
|
33
33
|
end
|
34
34
|
|
35
35
|
it "returns false" do
|
@@ -51,7 +51,10 @@ describe "::queue.process_one" do
|
|
51
51
|
items.update_all(failed_attempts: Postqueue::MAX_ATTEMPTS)
|
52
52
|
|
53
53
|
@called_block = 0
|
54
|
-
@result = queue.process_one do
|
54
|
+
@result = queue.process_one do
|
55
|
+
@called_block += 1
|
56
|
+
false
|
57
|
+
end
|
55
58
|
end
|
56
59
|
|
57
60
|
it "does not call the block" do
|
@@ -0,0 +1,80 @@
|
|
1
|
+
require "spec_helper"
|
2
|
+
|
3
|
+
describe "::queue.process" do
|
4
|
+
class Testqueue < Postqueue::Base
|
5
|
+
def batch_size(op:)
|
6
|
+
_ = op
|
7
|
+
10
|
8
|
+
end
|
9
|
+
end
|
10
|
+
|
11
|
+
let(:queue) { Testqueue.new }
|
12
|
+
|
13
|
+
describe "basics" do
|
14
|
+
before do
|
15
|
+
queue.enqueue op: "myop", entity_id: 12
|
16
|
+
queue.enqueue op: "myop", entity_id: 13
|
17
|
+
queue.enqueue op: "myop", entity_id: 14
|
18
|
+
end
|
19
|
+
|
20
|
+
it "processes the first entry" do
|
21
|
+
r = queue.process_one
|
22
|
+
expect(r).to eq(["myop", [12]])
|
23
|
+
expect(items.map(&:entity_id)).to contain_exactly(13, 14)
|
24
|
+
end
|
25
|
+
|
26
|
+
it "honors search conditions" do
|
27
|
+
queue.enqueue(op: "otherop", entity_id: 112)
|
28
|
+
|
29
|
+
r = queue.process_one(op: "otherop")
|
30
|
+
expect(r).to eq(["otherop", [112]])
|
31
|
+
expect(items.map(&:entity_id)).to contain_exactly(12, 13, 14)
|
32
|
+
end
|
33
|
+
|
34
|
+
it "yields a block and returns its return value" do
|
35
|
+
queue.enqueue op: "otherop", entity_id: 112
|
36
|
+
r = queue.process_one(op: "otherop") do |op, ids|
|
37
|
+
expect(op).to eq("otherop")
|
38
|
+
expect(ids).to eq([112])
|
39
|
+
"yihaa"
|
40
|
+
end
|
41
|
+
|
42
|
+
expect(r).to eq("yihaa")
|
43
|
+
expect(items.map(&:entity_id)).to contain_exactly(12, 13, 14)
|
44
|
+
end
|
45
|
+
end
|
46
|
+
|
47
|
+
context "when having entries with different entity_type and op" do
|
48
|
+
before do
|
49
|
+
queue.enqueue op: "myop", entity_id: 12
|
50
|
+
queue.enqueue op: "myop", entity_id: 13
|
51
|
+
queue.enqueue op: "otherop", entity_id: 14
|
52
|
+
queue.enqueue op: "myop", entity_id: 15
|
53
|
+
queue.enqueue op: "otherop", entity_id: 16
|
54
|
+
end
|
55
|
+
|
56
|
+
it "processes one entries" do
|
57
|
+
r = queue.process batch_size: 1
|
58
|
+
expect(r).to eq(["myop", [12]])
|
59
|
+
expect(items.map(&:entity_id)).to contain_exactly(13, 14, 15, 16)
|
60
|
+
end
|
61
|
+
|
62
|
+
it "processes two entries" do
|
63
|
+
r = queue.process batch_size: 2
|
64
|
+
expect(r).to eq(["myop", [12, 13]])
|
65
|
+
expect(items.map(&:entity_id)).to contain_exactly(14, 15, 16)
|
66
|
+
end
|
67
|
+
|
68
|
+
it "processes only matching entries when asked for more" do
|
69
|
+
r = queue.process
|
70
|
+
expect(r).to eq(["myop", [12, 13, 15]])
|
71
|
+
expect(items.map(&:entity_id)).to contain_exactly(14, 16)
|
72
|
+
end
|
73
|
+
|
74
|
+
it "honors search conditions" do
|
75
|
+
r = queue.process(op: "otherop")
|
76
|
+
expect(r).to eq(["otherop", [14, 16]])
|
77
|
+
expect(items.map(&:entity_id)).to contain_exactly(12, 13, 15)
|
78
|
+
end
|
79
|
+
end
|
80
|
+
end
|
data/spec/spec_helper.rb
CHANGED
@@ -1,24 +1,22 @@
|
|
1
|
-
path = File.expand_path(
|
1
|
+
path = File.expand_path("../../mpx/lib", __FILE__)
|
2
2
|
$LOAD_PATH.unshift(path) unless $LOAD_PATH.include?(path)
|
3
3
|
|
4
|
-
ENV[
|
4
|
+
ENV["RACK_ENV"] = "test"
|
5
5
|
|
6
|
-
require
|
7
|
-
require
|
8
|
-
require
|
6
|
+
require "rspec"
|
7
|
+
require "pry"
|
8
|
+
require "simplecov"
|
9
9
|
|
10
10
|
SimpleCov.start do
|
11
11
|
minimum_coverage 94
|
12
12
|
end
|
13
13
|
|
14
|
-
require
|
15
|
-
require
|
16
|
-
|
17
|
-
$logger = Logger.new(File.open("log/test.log", "a"))
|
14
|
+
require "postqueue"
|
15
|
+
require "./spec/support/configure_active_record"
|
18
16
|
|
19
17
|
module Postqueue
|
20
18
|
def self.logger
|
21
|
-
|
19
|
+
@logger ||= Logger.new(File.open("log/test.log", "a"))
|
22
20
|
end
|
23
21
|
end
|
24
22
|
|
@@ -28,10 +26,10 @@ end
|
|
28
26
|
|
29
27
|
RSpec.configure do |config|
|
30
28
|
config.run_all_when_everything_filtered = true
|
31
|
-
config.filter_run focus: (ENV[
|
29
|
+
config.filter_run focus: (ENV["CI"] != "true")
|
32
30
|
config.expect_with(:rspec) { |c| c.syntax = :expect }
|
33
|
-
config.order =
|
31
|
+
config.order = "random"
|
34
32
|
|
35
|
-
config.before(:all) {
|
36
|
-
config.after {
|
33
|
+
config.before(:all) {}
|
34
|
+
config.after {}
|
37
35
|
end
|
@@ -1,15 +1,14 @@
|
|
1
|
-
require
|
2
|
-
require_relative './models'
|
1
|
+
require "active_record"
|
3
2
|
|
4
3
|
$LOAD_PATH << File.dirname(__FILE__)
|
5
4
|
|
6
|
-
ActiveRecord::Base.establish_connection(adapter:
|
7
|
-
database:
|
8
|
-
username:
|
9
|
-
password:
|
5
|
+
ActiveRecord::Base.establish_connection(adapter: "postgresql",
|
6
|
+
database: "postqueue_test",
|
7
|
+
username: "postqueue",
|
8
|
+
password: "postqueue")
|
10
9
|
|
11
|
-
require_relative
|
12
|
-
require_relative
|
10
|
+
# require_relative "schema.rb"
|
11
|
+
# require_relative "models.rb"
|
13
12
|
|
14
13
|
Postqueue.unmigrate!
|
15
14
|
Postqueue.migrate!
|
@@ -18,7 +17,7 @@ RSpec.configure do |config|
|
|
18
17
|
config.around(:each) do |example|
|
19
18
|
ActiveRecord::Base.connection.transaction do
|
20
19
|
example.run
|
21
|
-
raise ActiveRecord::Rollback,
|
20
|
+
raise ActiveRecord::Rollback, "Clean up"
|
22
21
|
end
|
23
22
|
end
|
24
23
|
end
|
metadata
CHANGED
@@ -1,14 +1,14 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: postqueue
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.1
|
4
|
+
version: 0.2.1
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- radiospiel
|
8
8
|
autorequire:
|
9
9
|
bindir: bin
|
10
10
|
cert_chain: []
|
11
|
-
date: 2016-12-
|
11
|
+
date: 2016-12-11 00:00:00.000000000 Z
|
12
12
|
dependencies:
|
13
13
|
- !ruby/object:Gem::Dependency
|
14
14
|
name: rspec
|
@@ -108,6 +108,20 @@ dependencies:
|
|
108
108
|
- - "~>"
|
109
109
|
- !ruby/object:Gem::Version
|
110
110
|
version: '0'
|
111
|
+
- !ruby/object:Gem::Dependency
|
112
|
+
name: rubocop
|
113
|
+
requirement: !ruby/object:Gem::Requirement
|
114
|
+
requirements:
|
115
|
+
- - "~>"
|
116
|
+
- !ruby/object:Gem::Version
|
117
|
+
version: '0'
|
118
|
+
type: :development
|
119
|
+
prerelease: false
|
120
|
+
version_requirements: !ruby/object:Gem::Requirement
|
121
|
+
requirements:
|
122
|
+
- - "~>"
|
123
|
+
- !ruby/object:Gem::Version
|
124
|
+
version: '0'
|
111
125
|
- !ruby/object:Gem::Dependency
|
112
126
|
name: pg
|
113
127
|
requirement: !ruby/object:Gem::Requirement
|
@@ -132,19 +146,18 @@ files:
|
|
132
146
|
- README.md
|
133
147
|
- lib/postqueue.rb
|
134
148
|
- lib/postqueue/base.rb
|
149
|
+
- lib/postqueue/base/callback.rb
|
135
150
|
- lib/postqueue/base/enqueue.rb
|
136
151
|
- lib/postqueue/base/processing.rb
|
152
|
+
- lib/postqueue/base/select_and_lock.rb
|
137
153
|
- lib/postqueue/item.rb
|
138
154
|
- lib/postqueue/version.rb
|
139
155
|
- spec/postqueue/enqueue_spec.rb
|
140
|
-
- spec/postqueue/idempotent_queue_spec.rb
|
141
156
|
- spec/postqueue/postqueue_spec.rb
|
142
157
|
- spec/postqueue/process_errors_spec.rb
|
143
|
-
- spec/postqueue/
|
158
|
+
- spec/postqueue/process_spec.rb
|
144
159
|
- spec/spec_helper.rb
|
145
160
|
- spec/support/configure_active_record.rb
|
146
|
-
- spec/support/models.rb
|
147
|
-
- spec/support/schema.rb
|
148
161
|
homepage: https://github.com/radiospiel/postqueue
|
149
162
|
licenses:
|
150
163
|
- MIT
|
@@ -1,89 +0,0 @@
|
|
1
|
-
require 'spec_helper'
|
2
|
-
|
3
|
-
describe "Idempotent queue" do
|
4
|
-
class Testqueue < Postqueue::Base
|
5
|
-
def idempotent?(entity_type:,op:)
|
6
|
-
true
|
7
|
-
end
|
8
|
-
|
9
|
-
def batch_size(entity_type:,op:)
|
10
|
-
100
|
11
|
-
end
|
12
|
-
end
|
13
|
-
|
14
|
-
let(:queue) { Testqueue.new }
|
15
|
-
|
16
|
-
context 'when having entries with the same entity_type and op' do
|
17
|
-
before do
|
18
|
-
queue.enqueue op: "myop", entity_type: "mytype", entity_id: 12
|
19
|
-
queue.enqueue op: "myop", entity_type: "mytype", entity_id: 13
|
20
|
-
queue.enqueue op: "myop", entity_type: "mytype", entity_id: 14
|
21
|
-
end
|
22
|
-
|
23
|
-
it "processes one entries" do
|
24
|
-
r = queue.process batch_size: 1
|
25
|
-
expect(r).to eq(["myop", "mytype", [12]])
|
26
|
-
expect(items.map(&:entity_id)).to contain_exactly(13, 14)
|
27
|
-
end
|
28
|
-
|
29
|
-
it "processes two entries" do
|
30
|
-
r = queue.process batch_size: 2
|
31
|
-
expect(r).to eq(["myop", "mytype", [12, 13]])
|
32
|
-
expect(items.map(&:entity_id)).to contain_exactly(14)
|
33
|
-
end
|
34
|
-
|
35
|
-
it "processes many entries" do
|
36
|
-
r = queue.process
|
37
|
-
expect(r).to eq(["myop", "mytype", [12, 13, 14]])
|
38
|
-
expect(items.map(&:entity_id)).to contain_exactly()
|
39
|
-
end
|
40
|
-
end
|
41
|
-
|
42
|
-
context 'when having entries with different entity_type and op' do
|
43
|
-
before do
|
44
|
-
queue.enqueue op: "myop", entity_type: "mytype", entity_id: 12
|
45
|
-
queue.enqueue op: "myop", entity_type: "mytype", entity_id: 13
|
46
|
-
queue.enqueue op: "otherop", entity_type: "mytype", entity_id: 14
|
47
|
-
queue.enqueue op: "myop", entity_type: "othertype", entity_id: 15
|
48
|
-
queue.enqueue op: "otherop", entity_type: "othertype", entity_id: 16
|
49
|
-
end
|
50
|
-
|
51
|
-
it "processes one entries" do
|
52
|
-
r = queue.process batch_size: 1
|
53
|
-
expect(r).to eq(["myop", "mytype", [12]])
|
54
|
-
expect(items.map(&:entity_id)).to contain_exactly(13, 14, 15, 16)
|
55
|
-
end
|
56
|
-
|
57
|
-
it "processes two entries" do
|
58
|
-
r = queue.process batch_size: 2
|
59
|
-
expect(r).to eq(["myop", "mytype", [12, 13]])
|
60
|
-
expect(items.map(&:entity_id)).to contain_exactly(14, 15, 16)
|
61
|
-
end
|
62
|
-
|
63
|
-
it "processes only matching entries when asked for more" do
|
64
|
-
r = queue.process
|
65
|
-
expect(r).to eq(["myop", "mytype", [12, 13]])
|
66
|
-
expect(items.map(&:entity_id)).to contain_exactly(14, 15, 16)
|
67
|
-
end
|
68
|
-
|
69
|
-
it "honors search conditions" do
|
70
|
-
r = queue.process(op: "otherop")
|
71
|
-
expect(r).to eq(["otherop", "mytype", [14]])
|
72
|
-
expect(items.map(&:entity_id)).to contain_exactly(12, 13, 15, 16)
|
73
|
-
end
|
74
|
-
end
|
75
|
-
|
76
|
-
context 'when having duplicate entries' do
|
77
|
-
before do
|
78
|
-
queue.enqueue op: "myop", entity_type: "mytype", entity_id: 12
|
79
|
-
queue.enqueue op: "myop", entity_type: "mytype", entity_id: 13
|
80
|
-
queue.enqueue op: "myop", entity_type: "mytype", entity_id: 12
|
81
|
-
end
|
82
|
-
|
83
|
-
it "removes duplicates from the queue" do
|
84
|
-
r = queue.process batch_size: 1
|
85
|
-
expect(r).to eq(["myop", "mytype", [12]])
|
86
|
-
expect(items.map(&:entity_id)).to contain_exactly(13)
|
87
|
-
end
|
88
|
-
end
|
89
|
-
end
|
@@ -1,40 +0,0 @@
|
|
1
|
-
require 'spec_helper'
|
2
|
-
|
3
|
-
describe "::queue.process_one" do
|
4
|
-
let(:queue) { Postqueue.new }
|
5
|
-
|
6
|
-
before do
|
7
|
-
queue.enqueue op: "myop", entity_type: "mytype", entity_id: 12
|
8
|
-
queue.enqueue op: "myop", entity_type: "mytype", entity_id: 13
|
9
|
-
queue.enqueue op: "myop", entity_type: "mytype", entity_id: 14
|
10
|
-
end
|
11
|
-
|
12
|
-
let(:processor) { Postqueue::Processor.new }
|
13
|
-
|
14
|
-
it "processes one entry" do
|
15
|
-
r = queue.process_one
|
16
|
-
expect(r).to eq(["myop", "mytype", [12]])
|
17
|
-
expect(items.map(&:entity_id)).to contain_exactly(13, 14)
|
18
|
-
end
|
19
|
-
|
20
|
-
it "honors search conditions" do
|
21
|
-
queue.enqueue op: "otherop", entity_type: "mytype", entity_id: 112
|
22
|
-
|
23
|
-
r = queue.process_one(op: "otherop")
|
24
|
-
expect(r).to eq(["otherop", "mytype", [112]])
|
25
|
-
expect(items.map(&:entity_id)).to contain_exactly(12, 13, 14)
|
26
|
-
end
|
27
|
-
|
28
|
-
it "yields a block and returns it" do
|
29
|
-
queue.enqueue op: "otherop", entity_type: "mytype", entity_id: 112
|
30
|
-
r = queue.process_one(op: "otherop") do |op, type, ids|
|
31
|
-
expect(op).to eq("otherop")
|
32
|
-
expect(type).to eq("mytype")
|
33
|
-
expect(ids).to eq([112])
|
34
|
-
"yihaa"
|
35
|
-
end
|
36
|
-
|
37
|
-
expect(r).to eq("yihaa")
|
38
|
-
expect(items.map(&:entity_id)).to contain_exactly(12, 13, 14)
|
39
|
-
end
|
40
|
-
end
|
data/spec/support/models.rb
DELETED
@@ -1,123 +0,0 @@
|
|
1
|
-
__END__
|
2
|
-
|
3
|
-
require 'ostruct'
|
4
|
-
|
5
|
-
class MockAssocationInfo < OpenStruct
|
6
|
-
def virtual?; mode == :virtual; end
|
7
|
-
def belongs_to?; mode == :belongs_to; end
|
8
|
-
def habtm?; mode == :has_and_belongs_to_many; end
|
9
|
-
end
|
10
|
-
|
11
|
-
module AnalyticsReflectionStub
|
12
|
-
attr :analytics_reflection
|
13
|
-
def set_analytics_reflection(hsh)
|
14
|
-
@analytics_reflection = OpenStruct.new(hsh)
|
15
|
-
end
|
16
|
-
|
17
|
-
attr :analytics_parent
|
18
|
-
def set_analytics_parent(analytics_parent)
|
19
|
-
@analytics_parent = analytics_parent
|
20
|
-
end
|
21
|
-
end
|
22
|
-
|
23
|
-
class Unicorn < ActiveRecord::Base
|
24
|
-
extend AnalyticsReflectionStub
|
25
|
-
|
26
|
-
def self.name_without_prefix
|
27
|
-
'UnicornWithoutPrefix'
|
28
|
-
end
|
29
|
-
|
30
|
-
validates_presence_of :name
|
31
|
-
|
32
|
-
set_analytics_reflection associations_by_foreign_keys: {}, analytics_keys: [:name]
|
33
|
-
end
|
34
|
-
|
35
|
-
class Manticore < ActiveRecord::Base
|
36
|
-
def self.name_without_prefix
|
37
|
-
'ManticoreWithoutPrefix'
|
38
|
-
end
|
39
|
-
end
|
40
|
-
|
41
|
-
class Rider < ActiveRecord::Base
|
42
|
-
end
|
43
|
-
|
44
|
-
class Foal < ActiveRecord::Base
|
45
|
-
extend AnalyticsReflectionStub
|
46
|
-
|
47
|
-
belongs_to :parent, class_name: 'Unicorn'
|
48
|
-
has_and_belongs_to_many :riders, class_name: 'Rider'
|
49
|
-
|
50
|
-
set_analytics_reflection associations_by_foreign_keys:
|
51
|
-
{
|
52
|
-
parent_id: MockAssocationInfo.new(mode: :belongs_to, klass: Unicorn, name: :parent),
|
53
|
-
rider_ids: MockAssocationInfo.new(mode: :has_and_belongs_to_many, klass: Rider, name: :riders),
|
54
|
-
stable_ids: MockAssocationInfo.new(mode: :virtual, name: :stables)
|
55
|
-
},
|
56
|
-
analytics_keys: [:nick_name, :age, :parent]
|
57
|
-
set_analytics_parent :parent
|
58
|
-
end
|
59
|
-
|
60
|
-
class Pegasus < ActiveRecord::Base
|
61
|
-
extend AnalyticsReflectionStub
|
62
|
-
|
63
|
-
belongs_to :parent, class_name: 'Foal'
|
64
|
-
|
65
|
-
set_analytics_reflection associations_by_foreign_keys: { :parent_id => OpenStruct.new(klass: Foal, name: :parent) },
|
66
|
-
analytics_keys: [:nick_name, :age, :parent]
|
67
|
-
set_analytics_parent :parent
|
68
|
-
|
69
|
-
attr_reader :affiliation_id
|
70
|
-
end
|
71
|
-
|
72
|
-
class Dragon < ActiveRecord::Base
|
73
|
-
def self.name_without_prefix
|
74
|
-
'DragonWithoutPrefix'
|
75
|
-
end
|
76
|
-
|
77
|
-
def describe
|
78
|
-
'yihaa'
|
79
|
-
end
|
80
|
-
|
81
|
-
extend AnalyticsReflectionStub
|
82
|
-
end
|
83
|
-
|
84
|
-
class Asset < ActiveRecord::Base
|
85
|
-
extend AnalyticsReflectionStub
|
86
|
-
attr_reader :affiliation_id
|
87
|
-
end
|
88
|
-
|
89
|
-
class Product < ActiveRecord::Base
|
90
|
-
extend AnalyticsReflectionStub
|
91
|
-
attr_reader :affiliation_id
|
92
|
-
end
|
93
|
-
|
94
|
-
class User < ActiveRecord::Base
|
95
|
-
extend AnalyticsReflectionStub
|
96
|
-
attr_reader :affiliation_id
|
97
|
-
|
98
|
-
def analytics_title
|
99
|
-
"my analytics_title"
|
100
|
-
end
|
101
|
-
end
|
102
|
-
|
103
|
-
class Grouping < ActiveRecord::Base
|
104
|
-
extend AnalyticsReflectionStub
|
105
|
-
attr_reader :affiliation_id
|
106
|
-
end
|
107
|
-
|
108
|
-
class Group < Grouping
|
109
|
-
end
|
110
|
-
|
111
|
-
class MockProductAsset < ActiveRecord::Base
|
112
|
-
has_one :asset
|
113
|
-
has_one :product
|
114
|
-
|
115
|
-
attr_accessor :asset, :product
|
116
|
-
end
|
117
|
-
|
118
|
-
class MockGroupUser < ActiveRecord::Base
|
119
|
-
has_one :group
|
120
|
-
has_one :user
|
121
|
-
|
122
|
-
attr_accessor :group, :user
|
123
|
-
end
|
data/spec/support/schema.rb
DELETED
@@ -1,88 +0,0 @@
|
|
1
|
-
__END__
|
2
|
-
|
3
|
-
ActiveRecord::Schema.define do
|
4
|
-
self.verbose = false
|
5
|
-
|
6
|
-
create_table :unicorns, force: true do |t|
|
7
|
-
t.string :name, null: false
|
8
|
-
t.string :affiliation_id
|
9
|
-
end
|
10
|
-
|
11
|
-
create_table :foals, force: true do |t|
|
12
|
-
t.integer :parent_id, null: false
|
13
|
-
t.string :nick_name
|
14
|
-
t.integer :age
|
15
|
-
t.datetime :created_at
|
16
|
-
end
|
17
|
-
|
18
|
-
create_table :riders, force: true do |t|
|
19
|
-
t.string :nick_name
|
20
|
-
end
|
21
|
-
|
22
|
-
create_table :foal_riders, force: true do |t|
|
23
|
-
t.integer :rider_id
|
24
|
-
t.integer :foal_id
|
25
|
-
end
|
26
|
-
|
27
|
-
create_table :pegasus, force: true do |t|
|
28
|
-
t.integer :parent_id, null: false
|
29
|
-
t.string :nick_name
|
30
|
-
t.integer :age
|
31
|
-
t.datetime :created_at
|
32
|
-
end
|
33
|
-
|
34
|
-
create_table :mock_product_assets, force: true do |t|
|
35
|
-
t.integer :asset_id
|
36
|
-
t.string :access_level
|
37
|
-
t.integer :product_id
|
38
|
-
end
|
39
|
-
|
40
|
-
create_table :mock_group_users, force: true do |t|
|
41
|
-
t.integer :user_id
|
42
|
-
t.string :access_level
|
43
|
-
t.integer :group_id
|
44
|
-
end
|
45
|
-
|
46
|
-
create_table :assets, force: true
|
47
|
-
|
48
|
-
create_table :products, force: true
|
49
|
-
|
50
|
-
create_table :users, force: true do |t|
|
51
|
-
t.string :title
|
52
|
-
end
|
53
|
-
|
54
|
-
create_table :groupings, force: true
|
55
|
-
|
56
|
-
create_table :manticores, force: true do |t|
|
57
|
-
t.string :dummy_field
|
58
|
-
end
|
59
|
-
|
60
|
-
create_table :dragons, force: true do |t|
|
61
|
-
t.string :full_name
|
62
|
-
end
|
63
|
-
|
64
|
-
execute "INSERT INTO unicorns(affiliation_id, id, name) VALUES('mpx', 1, 'Faith')"
|
65
|
-
execute "INSERT INTO unicorns(affiliation_id, id, name) VALUES('mpx', 2, 'Faery')"
|
66
|
-
execute "INSERT INTO unicorns(affiliation_id, id, name) VALUES('mpx', 3, 'Yaser')"
|
67
|
-
|
68
|
-
execute "INSERT INTO foals(id, parent_id, nick_name, age, created_at) VALUES(1, 1, 'Little Faith', 12, 0)"
|
69
|
-
execute "INSERT INTO foals(id, parent_id, nick_name, age, created_at) VALUES(2, 1, 'Faith Nick', 9, 0)"
|
70
|
-
|
71
|
-
execute "INSERT INTO riders(id, nick_name) VALUES(1, 'Storm Rider')"
|
72
|
-
execute "INSERT INTO riders(id, nick_name) VALUES(2, 'Desert Rider')"
|
73
|
-
|
74
|
-
execute "INSERT INTO foal_riders(rider_id, foal_id) VALUES(1, 1)"
|
75
|
-
execute "INSERT INTO foal_riders(rider_id, foal_id) VALUES(2, 1)"
|
76
|
-
|
77
|
-
execute "INSERT INTO pegasus(parent_id, nick_name, age, created_at) VALUES(1, 'Derpy', 12, 0)"
|
78
|
-
|
79
|
-
execute "INSERT INTO dragons(full_name) VALUES('Chrysophylax Dives')"
|
80
|
-
execute "INSERT INTO dragons(full_name) VALUES('Nepomuk')"
|
81
|
-
execute "INSERT INTO dragons(full_name) VALUES('Smaug')"
|
82
|
-
|
83
|
-
execute "INSERT INTO manticores(dummy_field) VALUES('Dumb Manticore')"
|
84
|
-
|
85
|
-
execute "INSERT INTO users(id, title) VALUES(67, 'sixtyseven')"
|
86
|
-
|
87
|
-
execute "INSERT INTO groupings(id) VALUES(42)"
|
88
|
-
end
|