dynflow 1.4.0 → 1.4.5
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/.travis.yml +2 -0
- data/dynflow.gemspec +2 -1
- data/lib/dynflow/connectors/abstract.rb +4 -0
- data/lib/dynflow/connectors/database.rb +4 -0
- data/lib/dynflow/connectors/direct.rb +5 -0
- data/lib/dynflow/executors.rb +32 -10
- data/lib/dynflow/executors/sidekiq/orchestrator_jobs.rb +1 -1
- data/lib/dynflow/persistence.rb +8 -0
- data/lib/dynflow/persistence_adapters/abstract.rb +16 -0
- data/lib/dynflow/persistence_adapters/sequel.rb +23 -7
- data/lib/dynflow/persistence_adapters/sequel_migrations/001_initial.rb +2 -1
- data/lib/dynflow/persistence_adapters/sequel_migrations/020_drop_duplicate_indices.rb +30 -0
- data/lib/dynflow/rails/configuration.rb +12 -5
- data/lib/dynflow/version.rb +1 -1
- data/lib/dynflow/world/invalidation.rb +4 -0
- data/test/persistence_test.rb +36 -0
- data/test/redis_locking_test.rb +92 -0
- metadata +21 -4
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 531c1207ec6665f5773b2e71887e79d0259c4edd068d976f655766b5646ca9b0
|
4
|
+
data.tar.gz: ae88bf9434ebc2b7672751e24b95a60d1c4a38c1ff9396e68342c0529b68022c
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 3f2f5f4ccdc36b9bc0439fc0a864412126877f4da6f01eaa00b6bcadf6af05d0467d1eb1e6fde002ee35d962ea04f3c46df0e369b32e17c56fca9e26450ab1be
|
7
|
+
data.tar.gz: ac3c24d8f31492c3d053877a03162e54c0c2f2b7ebe3673cb4cb6134f2cc0ea83be6f54ea4251d4f801e2dacc4f90c6cfe16b2ffd818e0f46a573da2fe054189
|
data/.travis.yml
CHANGED
@@ -2,6 +2,7 @@ language: ruby
|
|
2
2
|
|
3
3
|
services:
|
4
4
|
- postgresql
|
5
|
+
- redis
|
5
6
|
|
6
7
|
rvm:
|
7
8
|
- "2.3.1"
|
@@ -18,6 +19,7 @@ matrix:
|
|
18
19
|
env: "DB=mysql DB_CONN_STRING=mysql2://root@localhost/travis_ci_test"
|
19
20
|
services:
|
20
21
|
- mysql
|
22
|
+
- redis
|
21
23
|
- rvm: "2.4.0"
|
22
24
|
env: "DB=sqlite3 DB_CONN_STRING=sqlite:/"
|
23
25
|
- rvm: "2.4.0"
|
data/dynflow.gemspec
CHANGED
@@ -23,13 +23,14 @@ Gem::Specification.new do |s|
|
|
23
23
|
s.add_dependency "apipie-params"
|
24
24
|
s.add_dependency "algebrick", '~> 0.7.0'
|
25
25
|
s.add_dependency "concurrent-ruby", '~> 1.1.3'
|
26
|
-
s.add_dependency "concurrent-ruby-edge", '~> 0.
|
26
|
+
s.add_dependency "concurrent-ruby-edge", '~> 0.6.0'
|
27
27
|
s.add_dependency "sequel", '>= 4.0.0'
|
28
28
|
|
29
29
|
s.add_development_dependency "rake"
|
30
30
|
s.add_development_dependency "rack-test"
|
31
31
|
s.add_development_dependency "minitest"
|
32
32
|
s.add_development_dependency "minitest-reporters"
|
33
|
+
s.add_development_dependency "minitest-stub-const"
|
33
34
|
s.add_development_dependency "activerecord"
|
34
35
|
s.add_development_dependency 'activejob'
|
35
36
|
s.add_development_dependency "sqlite3"
|
@@ -25,6 +25,10 @@ module Dynflow
|
|
25
25
|
raise NotImplementedError
|
26
26
|
end
|
27
27
|
|
28
|
+
def prune_undeliverable_envelopes(world)
|
29
|
+
raise NotImplementedError
|
30
|
+
end
|
31
|
+
|
28
32
|
# we need to pass the world, as the connector can be shared
|
29
33
|
# between words: we need to know the one to send the message to
|
30
34
|
def receive(world, envelope)
|
@@ -172,6 +172,10 @@ module Dynflow
|
|
172
172
|
Telemetry.with_instance { |t| t.increment_counter(:dynflow_connector_envelopes, 1, :world => envelope.sender_id, :direction => 'outgoing') }
|
173
173
|
@core.ask([:handle_envelope, envelope])
|
174
174
|
end
|
175
|
+
|
176
|
+
def prune_undeliverable_envelopes(world)
|
177
|
+
world.persistence.prune_undeliverable_envelopes
|
178
|
+
end
|
175
179
|
end
|
176
180
|
end
|
177
181
|
end
|
@@ -68,6 +68,11 @@ module Dynflow
|
|
68
68
|
Telemetry.with_instance { |t| t.increment_counter(:dynflow_connector_envelopes, 1, :world => envelope.sender_id) }
|
69
69
|
@core.ask([:handle_envelope, envelope])
|
70
70
|
end
|
71
|
+
|
72
|
+
def prune_undeliverable_envelopes(_world)
|
73
|
+
# This is a noop
|
74
|
+
0
|
75
|
+
end
|
71
76
|
end
|
72
77
|
end
|
73
78
|
end
|
data/lib/dynflow/executors.rb
CHANGED
@@ -4,16 +4,38 @@ module Dynflow
|
|
4
4
|
|
5
5
|
require 'dynflow/executors/parallel'
|
6
6
|
|
7
|
-
|
8
|
-
|
9
|
-
|
10
|
-
|
11
|
-
|
12
|
-
|
13
|
-
|
14
|
-
|
15
|
-
|
16
|
-
|
7
|
+
class << self
|
8
|
+
# Every time we run a code that can be defined outside of Dynflow,
|
9
|
+
# we should wrap it with this method, and we can ensure here to do
|
10
|
+
# necessary cleanup, such as cleaning ActiveRecord connections
|
11
|
+
def run_user_code
|
12
|
+
# Here we cover a case where the connection was already checked out from
|
13
|
+
# the pool and had opened transactions. In that case, we should leave the
|
14
|
+
# cleanup to the other runtime unit which opened the transaction. If the
|
15
|
+
# connection was checked out or there are no opened transactions, we can
|
16
|
+
# safely perform the cleanup.
|
17
|
+
no_previously_opened_transactions = active_record_open_transactions.zero?
|
18
|
+
yield
|
19
|
+
ensure
|
20
|
+
::ActiveRecord::Base.clear_active_connections! if no_previously_opened_transactions && active_record_connected?
|
21
|
+
::Logging.mdc.clear if defined? ::Logging
|
22
|
+
end
|
23
|
+
|
24
|
+
private
|
25
|
+
|
26
|
+
def active_record_open_transactions
|
27
|
+
active_record_active_connection&.open_transactions || 0
|
28
|
+
end
|
17
29
|
|
30
|
+
def active_record_active_connection
|
31
|
+
return unless defined?(::ActiveRecord) && ::ActiveRecord::Base.connected?
|
32
|
+
# #active_connection? returns the connection if already established or nil
|
33
|
+
::ActiveRecord::Base.connection_pool.active_connection?
|
34
|
+
end
|
35
|
+
|
36
|
+
def active_record_connected?
|
37
|
+
!!active_record_active_connection
|
38
|
+
end
|
39
|
+
end
|
18
40
|
end
|
19
41
|
end
|
@@ -13,7 +13,7 @@ module Dynflow
|
|
13
13
|
def perform(work_item, delayed_events = nil)
|
14
14
|
# Usually the step is saved on the worker's side. However if sidekiq is shut down,
|
15
15
|
# then the step may not have been saved so we save it just to be sure
|
16
|
-
if work_item.is_a?(Director::StepWorkItem) && work_item.step&.error&.
|
16
|
+
if work_item.is_a?(Director::StepWorkItem) && work_item.step&.error&.exception_class == ::Sidekiq::Shutdown
|
17
17
|
work_item.step.save
|
18
18
|
end
|
19
19
|
Dynflow.process_world.executor.core.tell([:work_finished, work_item, delayed_events])
|
data/lib/dynflow/persistence.rb
CHANGED
@@ -134,5 +134,13 @@ module Dynflow
|
|
134
134
|
envelope
|
135
135
|
end
|
136
136
|
end
|
137
|
+
|
138
|
+
def prune_envelopes(receiver_ids)
|
139
|
+
adapter.prune_envelopes(receiver_ids)
|
140
|
+
end
|
141
|
+
|
142
|
+
def prune_undeliverable_envelopes
|
143
|
+
adapter.prune_undeliverable_envelopes
|
144
|
+
end
|
137
145
|
end
|
138
146
|
end
|
@@ -116,6 +116,22 @@ module Dynflow
|
|
116
116
|
def push_envelope(envelope)
|
117
117
|
raise NotImplementedError
|
118
118
|
end
|
119
|
+
|
120
|
+
def prune_envelopes(receiver_ids)
|
121
|
+
raise NotImplementedError
|
122
|
+
end
|
123
|
+
|
124
|
+
def prune_undeliverable_envelopes
|
125
|
+
raise NotImplementedError
|
126
|
+
end
|
127
|
+
|
128
|
+
def migrate_db
|
129
|
+
raise NotImplementedError
|
130
|
+
end
|
131
|
+
|
132
|
+
def abort_if_pending_migrations!
|
133
|
+
raise NotImplementedError
|
134
|
+
end
|
119
135
|
end
|
120
136
|
end
|
121
137
|
end
|
@@ -45,13 +45,15 @@ module Dynflow
|
|
45
45
|
step: %w(error children) }
|
46
46
|
|
47
47
|
def initialize(config)
|
48
|
+
migrate = true
|
48
49
|
config = config.dup
|
49
50
|
@additional_responsibilities = { coordinator: true, connector: true }
|
50
|
-
if config.is_a?(Hash)
|
51
|
-
@additional_responsibilities.merge!(config.delete(:additional_responsibilities))
|
51
|
+
if config.is_a?(Hash)
|
52
|
+
@additional_responsibilities.merge!(config.delete(:additional_responsibilities)) if config.key?(:additional_responsibilities)
|
53
|
+
migrate = config.fetch(:migrate, true)
|
52
54
|
end
|
53
55
|
@db = initialize_db config
|
54
|
-
migrate_db
|
56
|
+
migrate_db if migrate
|
55
57
|
end
|
56
58
|
|
57
59
|
def transaction(&block)
|
@@ -198,6 +200,16 @@ module Dynflow
|
|
198
200
|
table(:envelope).insert(prepare_record(:envelope, envelope))
|
199
201
|
end
|
200
202
|
|
203
|
+
def prune_envelopes(receiver_ids)
|
204
|
+
connector_feature!
|
205
|
+
table(:envelope).where(receiver_id: receiver_ids).delete
|
206
|
+
end
|
207
|
+
|
208
|
+
def prune_undeliverable_envelopes
|
209
|
+
connector_feature!
|
210
|
+
table(:envelope).where(receiver_id: table(:coordinator_record).select(:id)).invert.delete
|
211
|
+
end
|
212
|
+
|
201
213
|
def coordinator_feature!
|
202
214
|
unless @additional_responsibilities[:coordinator]
|
203
215
|
raise "The sequel persistence adapter coordinator feature used but not enabled in additional_features"
|
@@ -238,6 +250,14 @@ module Dynflow
|
|
238
250
|
envelopes: table(:envelope).all.to_a }
|
239
251
|
end
|
240
252
|
|
253
|
+
def migrate_db
|
254
|
+
::Sequel::Migrator.run(db, self.class.migrations_path, table: 'dynflow_schema_info')
|
255
|
+
end
|
256
|
+
|
257
|
+
def abort_if_pending_migrations!
|
258
|
+
::Sequel::Migrator.check_current(db, self.class.migrations_path, table: 'dynflow_schema_info')
|
259
|
+
end
|
260
|
+
|
241
261
|
private
|
242
262
|
|
243
263
|
TABLES = { execution_plan: :dynflow_execution_plans,
|
@@ -259,10 +279,6 @@ module Dynflow
|
|
259
279
|
File.expand_path('../sequel_migrations', __FILE__)
|
260
280
|
end
|
261
281
|
|
262
|
-
def migrate_db
|
263
|
-
::Sequel::Migrator.run(db, self.class.migrations_path, table: 'dynflow_schema_info')
|
264
|
-
end
|
265
|
-
|
266
282
|
def prepare_record(table_name, value, base = {}, with_data = true)
|
267
283
|
record = base.dup
|
268
284
|
if with_data && table(table_name).columns.include?(:data)
|
@@ -32,7 +32,8 @@ Sequel.migration do
|
|
32
32
|
primary_key [:execution_plan_uuid, :id]
|
33
33
|
index [:execution_plan_uuid, :id], :unique => true
|
34
34
|
column :action_id, Integer
|
35
|
-
foreign_key [:execution_plan_uuid, :action_id], :dynflow_actions
|
35
|
+
foreign_key [:execution_plan_uuid, :action_id], :dynflow_actions,
|
36
|
+
name: :dynflow_steps_execution_plan_uuid_fkey1
|
36
37
|
index [:execution_plan_uuid, :action_id]
|
37
38
|
|
38
39
|
column :data, String, text: true
|
@@ -0,0 +1,30 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
Sequel.migration do
|
3
|
+
up do
|
4
|
+
alter_table(:dynflow_actions) do
|
5
|
+
drop_index [:execution_plan_uuid, :id]
|
6
|
+
end
|
7
|
+
|
8
|
+
alter_table(:dynflow_execution_plans) do
|
9
|
+
drop_index :uuid
|
10
|
+
end
|
11
|
+
|
12
|
+
alter_table(:dynflow_steps) do
|
13
|
+
drop_index [:execution_plan_uuid, :id]
|
14
|
+
end
|
15
|
+
end
|
16
|
+
|
17
|
+
down do
|
18
|
+
alter_table(:dynflow_actions) do
|
19
|
+
add_index [:execution_plan_uuid, :id], :unique => true
|
20
|
+
end
|
21
|
+
|
22
|
+
alter_table(:dynflow_execution_plans) do
|
23
|
+
add_index :uuid, :unique => true
|
24
|
+
end
|
25
|
+
|
26
|
+
alter_table(:dynflow_steps) do
|
27
|
+
add_index [:execution_plan_uuid, :id], :unique => true
|
28
|
+
end
|
29
|
+
end
|
30
|
+
end
|
@@ -100,8 +100,15 @@ module Dynflow
|
|
100
100
|
end
|
101
101
|
|
102
102
|
def calculate_db_pool_size(world)
|
103
|
-
self.db_pool_size
|
104
|
-
|
103
|
+
return self.db_pool_size if self.db_pool_size
|
104
|
+
|
105
|
+
base_value = 5
|
106
|
+
if defined?(::Sidekiq)
|
107
|
+
Sidekiq.options[:concurrency] + base_value
|
108
|
+
else
|
109
|
+
world.config.queues.values.inject(base_value) do |pool_size, pool_options|
|
110
|
+
pool_size += pool_options[:pool_size]
|
111
|
+
end
|
105
112
|
end
|
106
113
|
end
|
107
114
|
|
@@ -168,7 +175,7 @@ module Dynflow
|
|
168
175
|
if remote?
|
169
176
|
false
|
170
177
|
else
|
171
|
-
if defined?(::Sidekiq) &&
|
178
|
+
if defined?(::Sidekiq) && Sidekiq.options[:dynflow_executor]
|
172
179
|
::Dynflow::Executors::Sidekiq::Core
|
173
180
|
else
|
174
181
|
::Dynflow::Executors::Parallel::Core
|
@@ -185,8 +192,8 @@ module Dynflow
|
|
185
192
|
end
|
186
193
|
|
187
194
|
# Sequel adapter based on Rails app database.yml configuration
|
188
|
-
def initialize_persistence(world)
|
189
|
-
persistence_class.new(default_sequel_adapter_options(world))
|
195
|
+
def initialize_persistence(world, options = {})
|
196
|
+
persistence_class.new(default_sequel_adapter_options(world).merge(options))
|
190
197
|
end
|
191
198
|
end
|
192
199
|
end
|
data/lib/dynflow/version.rb
CHANGED
@@ -28,6 +28,8 @@ module Dynflow
|
|
28
28
|
end
|
29
29
|
end
|
30
30
|
|
31
|
+
pruned = persistence.prune_envelopes(world.id)
|
32
|
+
logger.error("Pruned #{pruned} envelopes for invalidated world #{world.id}") unless pruned.zero?
|
31
33
|
coordinator.delete_world(world)
|
32
34
|
end
|
33
35
|
end
|
@@ -115,6 +117,8 @@ module Dynflow
|
|
115
117
|
def perform_validity_checks
|
116
118
|
world_invalidation_result = worlds_validity_check
|
117
119
|
locks_validity_check
|
120
|
+
pruned = connector.prune_undeliverable_envelopes(self)
|
121
|
+
logger.error("Pruned #{pruned} undeliverable envelopes") unless pruned.zero?
|
118
122
|
world_invalidation_result.values.select { |result| result == :invalidated }.size
|
119
123
|
end
|
120
124
|
|
data/test/persistence_test.rb
CHANGED
@@ -1,6 +1,7 @@
|
|
1
1
|
# frozen_string_literal: true
|
2
2
|
require_relative 'test_helper'
|
3
3
|
require 'tmpdir'
|
4
|
+
require 'ostruct'
|
4
5
|
|
5
6
|
module Dynflow
|
6
7
|
module PersistenceTest
|
@@ -371,6 +372,41 @@ module Dynflow
|
|
371
372
|
assert_equal [], adapter.pull_envelopes(executor_world_id)
|
372
373
|
end
|
373
374
|
|
375
|
+
it 'supports pruning of envelopes of invalidated worlds' do
|
376
|
+
client_world_id = '5678'
|
377
|
+
executor_world_id = '1234'
|
378
|
+
envelope_hash = ->(envelope) { Dynflow::Utils.indifferent_hash(Dynflow.serializer.dump(envelope)) }
|
379
|
+
executor_envelope = envelope_hash.call(Dispatcher::Envelope['123', client_world_id, executor_world_id, Dispatcher::Execution['111']])
|
380
|
+
client_envelope = envelope_hash.call(Dispatcher::Envelope['123', executor_world_id, client_world_id, Dispatcher::Accepted])
|
381
|
+
envelopes = [client_envelope, executor_envelope]
|
382
|
+
|
383
|
+
envelopes.each { |e| adapter.push_envelope(e) }
|
384
|
+
|
385
|
+
assert_equal 1, adapter.prune_envelopes([executor_world_id])
|
386
|
+
assert_equal 0, adapter.prune_envelopes([executor_world_id])
|
387
|
+
assert_equal [], adapter.pull_envelopes(executor_world_id)
|
388
|
+
assert_equal [client_envelope], adapter.pull_envelopes(client_world_id)
|
389
|
+
end
|
390
|
+
|
391
|
+
it 'supports pruning of orphaned envelopes' do
|
392
|
+
client_world_id = '5678'
|
393
|
+
executor_world_id = '1234'
|
394
|
+
envelope_hash = ->(envelope) { Dynflow::Utils.indifferent_hash(Dynflow.serializer.dump(envelope)) }
|
395
|
+
executor_envelope = envelope_hash.call(Dispatcher::Envelope['123', client_world_id, executor_world_id, Dispatcher::Execution['111']])
|
396
|
+
client_envelope = envelope_hash.call(Dispatcher::Envelope['123', executor_world_id, client_world_id, Dispatcher::Accepted])
|
397
|
+
envelopes = [client_envelope, executor_envelope]
|
398
|
+
|
399
|
+
envelopes.each { |e| adapter.push_envelope(e) }
|
400
|
+
adapter.insert_coordinator_record({"class"=>"Dynflow::Coordinator::ExecutorWorld",
|
401
|
+
"id" => executor_world_id, "meta" => {}, "active" => true })
|
402
|
+
|
403
|
+
assert_equal 1, adapter.prune_undeliverable_envelopes
|
404
|
+
assert_equal 0, adapter.prune_undeliverable_envelopes
|
405
|
+
assert_equal [], adapter.pull_envelopes(client_world_id)
|
406
|
+
assert_equal [executor_envelope], adapter.pull_envelopes(executor_world_id)
|
407
|
+
assert_equal [], adapter.pull_envelopes(executor_world_id)
|
408
|
+
end
|
409
|
+
|
374
410
|
it 'supports reading data saved prior to normalization' do
|
375
411
|
db = adapter.send(:db)
|
376
412
|
# Prepare records for saving
|
@@ -0,0 +1,92 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
require_relative 'test_helper'
|
3
|
+
require 'mocha/minitest'
|
4
|
+
require 'minitest/stub_const'
|
5
|
+
require 'ostruct'
|
6
|
+
require 'sidekiq'
|
7
|
+
require 'dynflow/executors/sidekiq/core'
|
8
|
+
|
9
|
+
module Dynflow
|
10
|
+
module RedisLockingTest
|
11
|
+
describe Executors::Sidekiq::RedisLocking do
|
12
|
+
class Orchestrator
|
13
|
+
include Executors::Sidekiq::RedisLocking
|
14
|
+
|
15
|
+
attr_accessor :logger
|
16
|
+
|
17
|
+
def initialize(world, logger)
|
18
|
+
@world = world
|
19
|
+
@logger = logger
|
20
|
+
end
|
21
|
+
end
|
22
|
+
|
23
|
+
class Logger
|
24
|
+
attr_reader :logs
|
25
|
+
def initialize
|
26
|
+
@logs = []
|
27
|
+
end
|
28
|
+
|
29
|
+
[:info, :error, :fatal].each do |key|
|
30
|
+
define_method key do |message|
|
31
|
+
@logs << [key, message]
|
32
|
+
end
|
33
|
+
end
|
34
|
+
end
|
35
|
+
|
36
|
+
after do
|
37
|
+
::Sidekiq.redis do |conn|
|
38
|
+
conn.del Executors::Sidekiq::RedisLocking::REDIS_LOCK_KEY
|
39
|
+
end
|
40
|
+
end
|
41
|
+
|
42
|
+
def redis_orchestrator_id
|
43
|
+
::Sidekiq.redis do |conn|
|
44
|
+
conn.get Executors::Sidekiq::RedisLocking::REDIS_LOCK_KEY
|
45
|
+
end
|
46
|
+
end
|
47
|
+
|
48
|
+
let(:world) { OpenStruct.new(:id => '12345') }
|
49
|
+
let(:world2) { OpenStruct.new(:id => '67890') }
|
50
|
+
let(:orchestrator) { Orchestrator.new(world, Logger.new) }
|
51
|
+
let(:orchestrator2) { Orchestrator.new(world2, Logger.new) }
|
52
|
+
|
53
|
+
it 'acquires the lock when it is not taken' do
|
54
|
+
orchestrator.wait_for_orchestrator_lock
|
55
|
+
logs = orchestrator.logger.logs
|
56
|
+
_(redis_orchestrator_id).must_equal world.id
|
57
|
+
_(logs).must_equal [[:info, 'Acquired orchestrator lock, entering active mode.']]
|
58
|
+
end
|
59
|
+
|
60
|
+
it 'reacquires the lock if it was lost' do
|
61
|
+
orchestrator.reacquire_orchestrator_lock
|
62
|
+
logs = orchestrator.logger.logs
|
63
|
+
_(redis_orchestrator_id).must_equal world.id
|
64
|
+
_(logs).must_equal [[:error, 'The orchestrator lock was lost, reacquired']]
|
65
|
+
end
|
66
|
+
|
67
|
+
it 'terminates the process if lock was stolen' do
|
68
|
+
orchestrator.wait_for_orchestrator_lock
|
69
|
+
Process.expects(:kill)
|
70
|
+
orchestrator2.reacquire_orchestrator_lock
|
71
|
+
logs = orchestrator2.logger.logs
|
72
|
+
_(redis_orchestrator_id).must_equal world.id
|
73
|
+
_(logs).must_equal [[:fatal, 'The orchestrator lock was stolen by 12345, aborting.']]
|
74
|
+
end
|
75
|
+
|
76
|
+
it 'polls for the lock availability' do
|
77
|
+
Executors::Sidekiq::RedisLocking.stub_const(:REDIS_LOCK_TTL, 1) do
|
78
|
+
Executors::Sidekiq::RedisLocking.stub_const(:REDIS_LOCK_POLL_INTERVAL, 0.5) do
|
79
|
+
orchestrator.wait_for_orchestrator_lock
|
80
|
+
_(redis_orchestrator_id).must_equal world.id
|
81
|
+
orchestrator2.wait_for_orchestrator_lock
|
82
|
+
end
|
83
|
+
end
|
84
|
+
|
85
|
+
_(redis_orchestrator_id).must_equal world2.id
|
86
|
+
passive, active = orchestrator2.logger.logs
|
87
|
+
_(passive).must_equal [:info, 'Orchestrator lock already taken, entering passive mode.']
|
88
|
+
_(active).must_equal [:info, 'Acquired orchestrator lock, entering active mode.']
|
89
|
+
end
|
90
|
+
end
|
91
|
+
end
|
92
|
+
end
|
metadata
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: dynflow
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 1.4.
|
4
|
+
version: 1.4.5
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Ivan Necas
|
@@ -9,7 +9,7 @@ authors:
|
|
9
9
|
autorequire:
|
10
10
|
bindir: bin
|
11
11
|
cert_chain: []
|
12
|
-
date:
|
12
|
+
date: 2020-06-17 00:00:00.000000000 Z
|
13
13
|
dependencies:
|
14
14
|
- !ruby/object:Gem::Dependency
|
15
15
|
name: multi_json
|
@@ -73,14 +73,14 @@ dependencies:
|
|
73
73
|
requirements:
|
74
74
|
- - "~>"
|
75
75
|
- !ruby/object:Gem::Version
|
76
|
-
version: 0.
|
76
|
+
version: 0.6.0
|
77
77
|
type: :runtime
|
78
78
|
prerelease: false
|
79
79
|
version_requirements: !ruby/object:Gem::Requirement
|
80
80
|
requirements:
|
81
81
|
- - "~>"
|
82
82
|
- !ruby/object:Gem::Version
|
83
|
-
version: 0.
|
83
|
+
version: 0.6.0
|
84
84
|
- !ruby/object:Gem::Dependency
|
85
85
|
name: sequel
|
86
86
|
requirement: !ruby/object:Gem::Requirement
|
@@ -151,6 +151,20 @@ dependencies:
|
|
151
151
|
- - ">="
|
152
152
|
- !ruby/object:Gem::Version
|
153
153
|
version: '0'
|
154
|
+
- !ruby/object:Gem::Dependency
|
155
|
+
name: minitest-stub-const
|
156
|
+
requirement: !ruby/object:Gem::Requirement
|
157
|
+
requirements:
|
158
|
+
- - ">="
|
159
|
+
- !ruby/object:Gem::Version
|
160
|
+
version: '0'
|
161
|
+
type: :development
|
162
|
+
prerelease: false
|
163
|
+
version_requirements: !ruby/object:Gem::Requirement
|
164
|
+
requirements:
|
165
|
+
- - ">="
|
166
|
+
- !ruby/object:Gem::Version
|
167
|
+
version: '0'
|
154
168
|
- !ruby/object:Gem::Dependency
|
155
169
|
name: activerecord
|
156
170
|
requirement: !ruby/object:Gem::Requirement
|
@@ -498,6 +512,7 @@ files:
|
|
498
512
|
- lib/dynflow/persistence_adapters/sequel_migrations/017_add_delayed_plan_frozen.rb
|
499
513
|
- lib/dynflow/persistence_adapters/sequel_migrations/018_add_uuid_column.rb
|
500
514
|
- lib/dynflow/persistence_adapters/sequel_migrations/019_update_mysql_time_precision.rb
|
515
|
+
- lib/dynflow/persistence_adapters/sequel_migrations/020_drop_duplicate_indices.rb
|
501
516
|
- lib/dynflow/rails.rb
|
502
517
|
- lib/dynflow/rails/configuration.rb
|
503
518
|
- lib/dynflow/rails/daemon.rb
|
@@ -566,6 +581,7 @@ files:
|
|
566
581
|
- test/middleware_test.rb
|
567
582
|
- test/persistence_test.rb
|
568
583
|
- test/prepare_travis_env.sh
|
584
|
+
- test/redis_locking_test.rb
|
569
585
|
- test/rescue_test.rb
|
570
586
|
- test/round_robin_test.rb
|
571
587
|
- test/semaphores_test.rb
|
@@ -648,6 +664,7 @@ test_files:
|
|
648
664
|
- test/middleware_test.rb
|
649
665
|
- test/persistence_test.rb
|
650
666
|
- test/prepare_travis_env.sh
|
667
|
+
- test/redis_locking_test.rb
|
651
668
|
- test/rescue_test.rb
|
652
669
|
- test/round_robin_test.rb
|
653
670
|
- test/semaphores_test.rb
|