gush 2.0.1 → 2.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
data/lib/gush/client.rb CHANGED
@@ -1,9 +1,22 @@
1
- require 'connection_pool'
1
+ require 'redis'
2
+ require 'concurrent-ruby'
2
3
 
3
4
  module Gush
4
5
  class Client
5
6
  attr_reader :configuration
6
7
 
8
+ @@redis_connection = Concurrent::ThreadLocalVar.new(nil)
9
+
10
+ def self.redis_connection(config)
11
+ cached = (@@redis_connection.value ||= { url: config.redis_url, connection: nil })
12
+ return cached[:connection] if !cached[:connection].nil? && config.redis_url == cached[:url]
13
+
14
+ Redis.new(url: config.redis_url).tap do |instance|
15
+ RedisClassy.redis = instance
16
+ @@redis_connection.value = { url: config.redis_url, connection: instance }
17
+ end
18
+ end
19
+
7
20
  def initialize(config = Gush.configuration)
8
21
  @configuration = config
9
22
  end
@@ -47,9 +60,7 @@ module Gush
47
60
 
48
61
  loop do
49
62
  job_id = SecureRandom.uuid
50
- available = connection_pool.with do |redis|
51
- !redis.hexists("gush.jobs.#{workflow_id}.#{job_klass}", job_id)
52
- end
63
+ available = !redis.hexists("gush.jobs.#{workflow_id}.#{job_klass}", job_id)
53
64
 
54
65
  break if available
55
66
  end
@@ -61,9 +72,7 @@ module Gush
61
72
  id = nil
62
73
  loop do
63
74
  id = SecureRandom.uuid
64
- available = connection_pool.with do |redis|
65
- !redis.exists("gush.workflow.#{id}")
66
- end
75
+ available = !redis.exists?("gush.workflow.#{id}")
67
76
 
68
77
  break if available
69
78
  end
@@ -72,37 +81,31 @@ module Gush
72
81
  end
73
82
 
74
83
  def all_workflows
75
- connection_pool.with do |redis|
76
- redis.scan_each(match: "gush.workflows.*").map do |key|
77
- id = key.sub("gush.workflows.", "")
78
- find_workflow(id)
79
- end
84
+ redis.scan_each(match: "gush.workflows.*").map do |key|
85
+ id = key.sub("gush.workflows.", "")
86
+ find_workflow(id)
80
87
  end
81
88
  end
82
89
 
83
90
  def find_workflow(id)
84
- connection_pool.with do |redis|
85
- data = redis.get("gush.workflows.#{id}")
91
+ data = redis.get("gush.workflows.#{id}")
86
92
 
87
- unless data.nil?
88
- hash = Gush::JSON.decode(data, symbolize_keys: true)
89
- keys = redis.scan_each(match: "gush.jobs.#{id}.*")
93
+ unless data.nil?
94
+ hash = Gush::JSON.decode(data, symbolize_keys: true)
95
+ keys = redis.scan_each(match: "gush.jobs.#{id}.*")
90
96
 
91
- nodes = keys.each_with_object([]) do |key, array|
92
- array.concat redis.hvals(key).map { |json| Gush::JSON.decode(json, symbolize_keys: true) }
93
- end
94
-
95
- workflow_from_hash(hash, nodes)
96
- else
97
- raise WorkflowNotFound.new("Workflow with given id doesn't exist")
97
+ nodes = keys.each_with_object([]) do |key, array|
98
+ array.concat redis.hvals(key).map { |json| Gush::JSON.decode(json, symbolize_keys: true) }
98
99
  end
100
+
101
+ workflow_from_hash(hash, nodes)
102
+ else
103
+ raise WorkflowNotFound.new("Workflow with given id doesn't exist")
99
104
  end
100
105
  end
101
106
 
102
107
  def persist_workflow(workflow)
103
- connection_pool.with do |redis|
104
- redis.set("gush.workflows.#{workflow.id}", workflow.to_json)
105
- end
108
+ redis.set("gush.workflows.#{workflow.id}", workflow.to_json)
106
109
 
107
110
  workflow.jobs.each {|job| persist_job(workflow.id, job) }
108
111
  workflow.mark_as_persisted
@@ -111,9 +114,7 @@ module Gush
111
114
  end
112
115
 
113
116
  def persist_job(workflow_id, job)
114
- connection_pool.with do |redis|
115
- redis.hset("gush.jobs.#{workflow_id}.#{job.klass}", job.id, job.to_json)
116
- end
117
+ redis.hset("gush.jobs.#{workflow_id}.#{job.klass}", job.id, job.to_json)
117
118
  end
118
119
 
119
120
  def find_job(workflow_id, job_name)
@@ -132,31 +133,23 @@ module Gush
132
133
  end
133
134
 
134
135
  def destroy_workflow(workflow)
135
- connection_pool.with do |redis|
136
- redis.del("gush.workflows.#{workflow.id}")
137
- end
136
+ redis.del("gush.workflows.#{workflow.id}")
138
137
  workflow.jobs.each {|job| destroy_job(workflow.id, job) }
139
138
  end
140
139
 
141
140
  def destroy_job(workflow_id, job)
142
- connection_pool.with do |redis|
143
- redis.del("gush.jobs.#{workflow_id}.#{job.klass}")
144
- end
141
+ redis.del("gush.jobs.#{workflow_id}.#{job.klass}")
145
142
  end
146
143
 
147
144
  def expire_workflow(workflow, ttl=nil)
148
145
  ttl = ttl || configuration.ttl
149
- connection_pool.with do |redis|
150
- redis.expire("gush.workflows.#{workflow.id}", ttl)
151
- end
146
+ redis.expire("gush.workflows.#{workflow.id}", ttl)
152
147
  workflow.jobs.each {|job| expire_job(workflow.id, job, ttl) }
153
148
  end
154
149
 
155
150
  def expire_job(workflow_id, job, ttl=nil)
156
151
  ttl = ttl || configuration.ttl
157
- connection_pool.with do |redis|
158
- redis.expire("gush.jobs.#{workflow_id}.#{job.name}", ttl)
159
- end
152
+ redis.expire("gush.jobs.#{workflow_id}.#{job.klass}", ttl)
160
153
  end
161
154
 
162
155
  def enqueue_job(workflow_id, job)
@@ -172,16 +165,11 @@ module Gush
172
165
  def find_job_by_klass_and_id(workflow_id, job_name)
173
166
  job_klass, job_id = job_name.split('|')
174
167
 
175
- connection_pool.with do |redis|
176
- redis.hget("gush.jobs.#{workflow_id}.#{job_klass}", job_id)
177
- end
168
+ redis.hget("gush.jobs.#{workflow_id}.#{job_klass}", job_id)
178
169
  end
179
170
 
180
171
  def find_job_by_klass(workflow_id, job_name)
181
- new_cursor, result = connection_pool.with do |redis|
182
- redis.hscan("gush.jobs.#{workflow_id}.#{job_name}", 0, count: 1)
183
- end
184
-
172
+ new_cursor, result = redis.hscan("gush.jobs.#{workflow_id}.#{job_name}", 0, count: 1)
185
173
  return nil if result.empty?
186
174
 
187
175
  job_id, job = *result[0]
@@ -202,14 +190,8 @@ module Gush
202
190
  flow
203
191
  end
204
192
 
205
- def build_redis
206
- Redis.new(url: configuration.redis_url).tap do |instance|
207
- RedisClassy.redis = instance
208
- end
209
- end
210
-
211
- def connection_pool
212
- @connection_pool ||= ConnectionPool.new(size: configuration.concurrency, timeout: 1) { build_redis }
193
+ def redis
194
+ self.class.redis_connection(configuration)
213
195
  end
214
196
  end
215
197
  end
@@ -1,17 +1,19 @@
1
1
  module Gush
2
2
  class Configuration
3
- attr_accessor :concurrency, :namespace, :redis_url, :ttl
3
+ attr_accessor :concurrency, :namespace, :redis_url, :ttl, :locking_duration, :polling_interval
4
4
 
5
5
  def self.from_json(json)
6
6
  new(Gush::JSON.decode(json, symbolize_keys: true))
7
7
  end
8
8
 
9
9
  def initialize(hash = {})
10
- self.concurrency = hash.fetch(:concurrency, 5)
11
- self.namespace = hash.fetch(:namespace, 'gush')
12
- self.redis_url = hash.fetch(:redis_url, 'redis://localhost:6379')
13
- self.gushfile = hash.fetch(:gushfile, 'Gushfile')
14
- self.ttl = hash.fetch(:ttl, -1)
10
+ self.concurrency = hash.fetch(:concurrency, 5)
11
+ self.namespace = hash.fetch(:namespace, 'gush')
12
+ self.redis_url = hash.fetch(:redis_url, 'redis://localhost:6379')
13
+ self.gushfile = hash.fetch(:gushfile, 'Gushfile')
14
+ self.ttl = hash.fetch(:ttl, -1)
15
+ self.locking_duration = hash.fetch(:locking_duration, 2) # how long you want to wait for the lock to be released, in seconds
16
+ self.polling_interval = hash.fetch(:polling_internal, 0.3) # how long the polling interval should be, in seconds
15
17
  end
16
18
 
17
19
  def gushfile=(path)
@@ -24,10 +26,12 @@ module Gush
24
26
 
25
27
  def to_hash
26
28
  {
27
- concurrency: concurrency,
28
- namespace: namespace,
29
- redis_url: redis_url,
30
- ttl: ttl
29
+ concurrency: concurrency,
30
+ namespace: namespace,
31
+ redis_url: redis_url,
32
+ ttl: ttl,
33
+ locking_duration: locking_duration,
34
+ polling_interval: polling_interval
31
35
  }
32
36
  end
33
37
 
data/lib/gush/graph.rb CHANGED
@@ -1,6 +1,10 @@
1
+ # frozen_string_literal: true
2
+
3
+ require 'tmpdir'
4
+
1
5
  module Gush
2
6
  class Graph
3
- attr_reader :workflow, :filename, :path, :start, :end_node
7
+ attr_reader :workflow, :filename, :path, :start_node, :end_node
4
8
 
5
9
  def initialize(workflow, options = {})
6
10
  @workflow = workflow
@@ -9,19 +13,26 @@ module Gush
9
13
  end
10
14
 
11
15
  def viz
12
- GraphViz.new(:G, graph_options) do |graph|
13
- set_node_options!(graph)
14
- set_edge_options!(graph)
16
+ @graph = Graphviz::Graph.new(**graph_options)
17
+ @start_node = add_node('start', shape: 'diamond', fillcolor: '#CFF09E')
18
+ @end_node = add_node('end', shape: 'diamond', fillcolor: '#F56991')
15
19
 
16
- @start = graph.start(shape: 'diamond', fillcolor: '#CFF09E')
17
- @end_node = graph.end(shape: 'diamond', fillcolor: '#F56991')
18
-
19
- workflow.jobs.each do |job|
20
- add_job(graph, job)
21
- end
20
+ # First, create nodes for all jobs
21
+ @job_name_to_node_map = {}
22
+ workflow.jobs.each do |job|
23
+ add_job_node(job)
24
+ end
22
25
 
23
- graph.output(png: path)
26
+ # Next, link up the jobs with edges
27
+ workflow.jobs.each do |job|
28
+ link_job_edges(job)
24
29
  end
30
+
31
+ format = 'png'
32
+ file_format = path.split('.')[-1]
33
+ format = file_format if file_format.length == 3
34
+
35
+ Graphviz::output(@graph, path: path, format: format)
25
36
  end
26
37
 
27
38
  def path
@@ -29,43 +40,43 @@ module Gush
29
40
  end
30
41
 
31
42
  private
32
- def add_job(graph, job)
33
- name = job.class.to_s
34
- graph.add_nodes(job.name, label: name)
43
+
44
+ def add_node(name, **specific_options)
45
+ @graph.add_node(name, **node_options.merge(specific_options))
46
+ end
47
+
48
+ def add_job_node(job)
49
+ @job_name_to_node_map[job.name] = add_node(job.name, label: node_label_for_job(job))
50
+ end
51
+
52
+ def link_job_edges(job)
53
+ job_node = @job_name_to_node_map[job.name]
35
54
 
36
55
  if job.incoming.empty?
37
- graph.add_edges(start, job.name)
56
+ @start_node.connect(job_node, **edge_options)
38
57
  end
39
58
 
40
59
  if job.outgoing.empty?
41
- graph.add_edges(job.name, end_node)
60
+ job_node.connect(@end_node, **edge_options)
42
61
  else
43
62
  job.outgoing.each do |id|
44
63
  outgoing_job = workflow.find_job(id)
45
- graph.add_edges(job.name, outgoing_job.name)
64
+ job_node.connect(@job_name_to_node_map[outgoing_job.name], **edge_options)
46
65
  end
47
66
  end
48
67
  end
49
68
 
50
- def set_node_options!(graph)
51
- node_options.each do |key, value|
52
- graph.node[key] = value
53
- end
54
- end
55
-
56
- def set_edge_options!(graph)
57
- edge_options.each do |key, value|
58
- graph.edge[key] = value
59
- end
69
+ def node_label_for_job(job)
70
+ job.class.to_s
60
71
  end
61
72
 
62
73
  def graph_options
63
74
  {
64
- type: :digraph,
65
- dpi: 200,
66
- compound: true,
67
- rankdir: "LR",
68
- center: true
75
+ dpi: 200,
76
+ compound: true,
77
+ rankdir: "LR",
78
+ center: true,
79
+ format: 'png'
69
80
  }
70
81
  end
71
82
 
data/lib/gush/worker.rb CHANGED
@@ -6,6 +6,12 @@ module Gush
6
6
  def perform(workflow_id, job_id)
7
7
  setup_job(workflow_id, job_id)
8
8
 
9
+ if job.succeeded?
10
+ # Try to enqueue outgoing jobs again because the last job has redis mutex lock error
11
+ enqueue_outgoing_jobs
12
+ return
13
+ end
14
+
9
15
  job.payloads = incoming_payloads
10
16
 
11
17
  error = nil
@@ -24,12 +30,16 @@ module Gush
24
30
 
25
31
  private
26
32
 
27
- attr_reader :client, :workflow_id, :job
33
+ attr_reader :client, :workflow_id, :job, :configuration
28
34
 
29
35
  def client
30
36
  @client ||= Gush::Client.new(Gush.configuration)
31
37
  end
32
38
 
39
+ def configuration
40
+ @configuration ||= client.configuration
41
+ end
42
+
33
43
  def setup_job(workflow_id, job_id)
34
44
  @workflow_id = workflow_id
35
45
  @job ||= client.find_job(workflow_id, job_id)
@@ -67,7 +77,11 @@ module Gush
67
77
 
68
78
  def enqueue_outgoing_jobs
69
79
  job.outgoing.each do |job_name|
70
- RedisMutex.with_lock("gush_enqueue_outgoing_jobs_#{workflow_id}-#{job_name}", sleep: 0.3, block: 2) do
80
+ RedisMutex.with_lock(
81
+ "gush_enqueue_outgoing_jobs_#{workflow_id}-#{job_name}",
82
+ sleep: configuration.polling_interval,
83
+ block: configuration.locking_duration
84
+ ) do
71
85
  out = client.find_job(workflow_id, job_name)
72
86
 
73
87
  if out.ready_to_start?
@@ -75,6 +89,8 @@ module Gush
75
89
  end
76
90
  end
77
91
  end
92
+ rescue RedisMutex::LockError
93
+ Worker.set(wait: 2.seconds).perform_later(workflow_id, job.name)
78
94
  end
79
95
  end
80
96
  end
@@ -152,17 +152,15 @@ describe "Workflows" do
152
152
  flow = PayloadWorkflow.create
153
153
  flow.start!
154
154
 
155
- perform_one
156
- expect(flow.reload.find_job(flow.jobs[0].name).output_payload).to eq('first')
155
+ 3.times { perform_one }
157
156
 
158
- perform_one
159
- expect(flow.reload.find_job(flow.jobs[1].name).output_payload).to eq('second')
157
+ outputs = flow.reload.jobs.select { |j| j.klass == 'RepetitiveJob' }.map { |j| j.output_payload }
158
+ expect(outputs).to match_array(['first', 'second', 'third'])
160
159
 
161
160
  perform_one
162
- expect(flow.reload.find_job(flow.jobs[2].name).output_payload).to eq('third')
163
161
 
164
- perform_one
165
- expect(flow.reload.find_job(flow.jobs[3].name).output_payload).to eq(%w(first second third))
162
+ summary_job = flow.reload.jobs.find { |j| j.klass == 'SummaryJob' }
163
+ expect(summary_job.output_payload).to eq(%w(first second third))
166
164
  end
167
165
 
168
166
  it "does not execute `configure` on each job for huge workflows" do
@@ -95,12 +95,18 @@ describe Gush::Client do
95
95
  end
96
96
 
97
97
  describe "#expire_workflow" do
98
+ let(:ttl) { 2000 }
99
+
98
100
  it "sets TTL for all Redis keys related to the workflow" do
99
101
  workflow = TestWorkflow.create
100
102
 
101
- client.expire_workflow(workflow, -1)
103
+ client.expire_workflow(workflow, ttl)
104
+
105
+ expect(redis.ttl("gush.workflows.#{workflow.id}")).to eq(ttl)
102
106
 
103
- # => TODO - I believe fakeredis does not handle TTL the same.
107
+ workflow.jobs.each do |job|
108
+ expect(redis.ttl("gush.jobs.#{workflow.id}.#{job.klass}")).to eq(ttl)
109
+ end
104
110
  end
105
111
  end
106
112
 
@@ -8,6 +8,8 @@ describe Gush::Configuration do
8
8
  expect(subject.concurrency).to eq(5)
9
9
  expect(subject.namespace).to eq('gush')
10
10
  expect(subject.gushfile).to eq(GUSHFILE.realpath)
11
+ expect(subject.locking_duration).to eq(2)
12
+ expect(subject.polling_interval).to eq(0.3)
11
13
  end
12
14
 
13
15
  describe "#configure" do
@@ -15,10 +17,14 @@ describe Gush::Configuration do
15
17
  Gush.configure do |config|
16
18
  config.redis_url = "redis://localhost"
17
19
  config.concurrency = 25
20
+ config.locking_duration = 5
21
+ config.polling_interval = 0.5
18
22
  end
19
23
 
20
24
  expect(Gush.configuration.redis_url).to eq("redis://localhost")
21
25
  expect(Gush.configuration.concurrency).to eq(25)
26
+ expect(Gush.configuration.locking_duration).to eq(5)
27
+ expect(Gush.configuration.polling_interval).to eq(0.5)
22
28
  end
23
29
  end
24
30
  end
@@ -10,26 +10,43 @@ describe Gush::Graph do
10
10
  edge = double("edge", :[]= => true)
11
11
  graph = double("graph", node: node, edge: edge)
12
12
  path = Pathname.new(Dir.tmpdir).join(filename)
13
- expect(graph).to receive(:start).with(shape: 'diamond', fillcolor: '#CFF09E')
14
- expect(graph).to receive(:end).with(shape: 'diamond', fillcolor: '#F56991')
15
-
16
- expect(graph).to receive(:output).with(png: path.to_s)
17
-
18
- expect(graph).to receive(:add_nodes).with(/Prepare/, label: "Prepare")
19
- expect(graph).to receive(:add_nodes).with(/FetchFirstJob/, label: "FetchFirstJob")
20
- expect(graph).to receive(:add_nodes).with(/FetchSecondJob/, label: "FetchSecondJob")
21
- expect(graph).to receive(:add_nodes).with(/NormalizeJob/, label: "NormalizeJob")
22
- expect(graph).to receive(:add_nodes).with(/PersistFirstJob/, label: "PersistFirstJob")
23
-
24
- expect(graph).to receive(:add_edges).with(nil, /Prepare/)
25
- expect(graph).to receive(:add_edges).with(/Prepare/, /FetchFirstJob/)
26
- expect(graph).to receive(:add_edges).with(/Prepare/, /FetchSecondJob/)
27
- expect(graph).to receive(:add_edges).with(/FetchFirstJob/, /PersistFirstJob/)
28
- expect(graph).to receive(:add_edges).with(/FetchSecondJob/, /NormalizeJob/)
29
- expect(graph).to receive(:add_edges).with(/PersistFirstJob/, /NormalizeJob/)
30
- expect(graph).to receive(:add_edges).with(/NormalizeJob/, nil)
31
-
32
- expect(GraphViz).to receive(:new).and_yield(graph)
13
+
14
+ expect(Graphviz::Graph).to receive(:new).and_return(graph)
15
+
16
+ node_start = double('start')
17
+ node_end = double('end')
18
+ node_prepare = double('Prepare')
19
+ node_fetch_first_job = double('FetchFirstJob')
20
+ node_fetch_second_job = double('FetchSecondJob')
21
+ node_normalize_job = double('NormalizeJob')
22
+ node_persist_first_job = double('PersistFirstJob')
23
+
24
+ expect(graph).to receive(:add_node).with('start', {shape: 'diamond', fillcolor: '#CFF09E', color: "#555555", style: 'filled'}).and_return(node_start)
25
+ expect(graph).to receive(:add_node).with('end', {shape: 'diamond', fillcolor: '#F56991', color: "#555555", style: 'filled'}).and_return(node_end)
26
+
27
+ standard_options = {:color=>"#555555", :fillcolor=>"white", :label=>"Prepare", :shape=>"ellipse", :style=>"filled"}
28
+
29
+ expect(graph).to receive(:add_node).with(/Prepare/, standard_options.merge(label: "Prepare")).and_return(node_prepare)
30
+ expect(graph).to receive(:add_node).with(/FetchFirstJob/, standard_options.merge(label: "FetchFirstJob")).and_return(node_fetch_first_job)
31
+ expect(graph).to receive(:add_node).with(/FetchSecondJob/, standard_options.merge(label: "FetchSecondJob")).and_return(node_fetch_second_job)
32
+ expect(graph).to receive(:add_node).with(/NormalizeJob/, standard_options.merge(label: "NormalizeJob")).and_return(node_normalize_job)
33
+ expect(graph).to receive(:add_node).with(/PersistFirstJob/, standard_options.merge(label: "PersistFirstJob")).and_return(node_persist_first_job)
34
+
35
+ edge_options = {
36
+ dir: "forward",
37
+ penwidth: 1,
38
+ color: "#555555"
39
+ }
40
+
41
+ expect(node_start).to receive(:connect).with(node_prepare, **edge_options)
42
+ expect(node_prepare).to receive(:connect).with(node_fetch_first_job, **edge_options)
43
+ expect(node_prepare).to receive(:connect).with(node_fetch_second_job, **edge_options)
44
+ expect(node_fetch_first_job).to receive(:connect).with(node_persist_first_job, **edge_options)
45
+ expect(node_fetch_second_job).to receive(:connect).with(node_normalize_job, **edge_options)
46
+ expect(node_persist_first_job).to receive(:connect).with(node_normalize_job, **edge_options)
47
+ expect(node_normalize_job).to receive(:connect).with(node_end, **edge_options)
48
+
49
+ expect(graph).to receive(:dump_graph).and_return(nil)
33
50
 
34
51
  subject.viz
35
52
  end
@@ -4,6 +4,8 @@ describe Gush::Worker do
4
4
  subject { described_class.new }
5
5
 
6
6
  let!(:workflow) { TestWorkflow.create }
7
+ let(:locking_duration) { 5 }
8
+ let(:polling_interval) { 0.5 }
7
9
  let!(:job) { client.find_job(workflow.id, "Prepare") }
8
10
  let(:config) { Gush.configuration.to_json }
9
11
  let!(:client) { Gush::Client.new }
@@ -39,6 +41,18 @@ describe Gush::Worker do
39
41
  end
40
42
  end
41
43
 
44
+ context 'when job failed to enqueue outgoing jobs' do
45
+ it 'enqeues another job to handling enqueue_outgoing_jobs' do
46
+ allow(RedisMutex).to receive(:with_lock).and_raise(RedisMutex::LockError)
47
+ subject.perform(workflow.id, 'Prepare')
48
+ expect(Gush::Worker).to have_no_jobs(workflow.id, jobs_with_id(["FetchFirstJob", "FetchSecondJob"]))
49
+
50
+ allow(RedisMutex).to receive(:with_lock).and_call_original
51
+ perform_one
52
+ expect(Gush::Worker).to have_jobs(workflow.id, jobs_with_id(["FetchFirstJob", "FetchSecondJob"]))
53
+ end
54
+ end
55
+
42
56
  it "calls job.perform method" do
43
57
  SPY = double()
44
58
  expect(SPY).to receive(:some_method)
@@ -59,5 +73,11 @@ describe Gush::Worker do
59
73
 
60
74
  subject.perform(workflow.id, 'OkayJob')
61
75
  end
76
+
77
+ it 'calls RedisMutex.with_lock with customizable locking_duration and polling_interval' do
78
+ expect(RedisMutex).to receive(:with_lock)
79
+ .with(anything, block: 5, sleep: 0.5).twice
80
+ subject.perform(workflow.id, 'Prepare')
81
+ end
62
82
  end
63
83
  end
data/spec/spec_helper.rb CHANGED
@@ -1,5 +1,4 @@
1
1
  require 'gush'
2
- require 'fakeredis'
3
2
  require 'json'
4
3
  require 'pry'
5
4
 
@@ -35,12 +34,8 @@ class ParameterTestWorkflow < Gush::Workflow
35
34
  end
36
35
  end
37
36
 
38
- class Redis
39
- def publish(*)
40
- end
41
- end
42
37
 
43
- REDIS_URL = "redis://localhost:6379/12"
38
+ REDIS_URL = ENV["REDIS_URL"] || "redis://localhost:6379/12"
44
39
 
45
40
  module GushHelpers
46
41
  def redis
@@ -78,6 +73,19 @@ RSpec::Matchers.define :have_jobs do |flow, jobs|
78
73
  end
79
74
  end
80
75
 
76
+ RSpec::Matchers.define :have_no_jobs do |flow, jobs|
77
+ match do |actual|
78
+ expected = jobs.map do |job|
79
+ hash_including(args: include(flow, job))
80
+ end
81
+ expect(ActiveJob::Base.queue_adapter.enqueued_jobs).not_to match_array(expected)
82
+ end
83
+
84
+ failure_message do |actual|
85
+ "expected queue to have no #{jobs}, but instead has: #{ActiveJob::Base.queue_adapter.enqueued_jobs.map{ |j| j[:args][1]}}"
86
+ end
87
+ end
88
+
81
89
  RSpec.configure do |config|
82
90
  config.include ActiveJob::TestHelper
83
91
  config.include GushHelpers
@@ -91,12 +99,13 @@ RSpec.configure do |config|
91
99
  clear_performed_jobs
92
100
 
93
101
  Gush.configure do |config|
94
- config.redis_url = REDIS_URL
95
- config.gushfile = GUSHFILE
102
+ config.redis_url = REDIS_URL
103
+ config.gushfile = GUSHFILE
104
+ config.locking_duration = defined?(locking_duration) ? locking_duration : 2
105
+ config.polling_interval = defined?(polling_interval) ? polling_interval : 0.3
96
106
  end
97
107
  end
98
108
 
99
-
100
109
  config.after(:each) do
101
110
  clear_enqueued_jobs
102
111
  clear_performed_jobs