dynflow 1.9.3 → 2.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (47) hide show
  1. checksums.yaml +4 -4
  2. data/.github/workflows/bats.yml +50 -0
  3. data/.github/workflows/release.yml +1 -1
  4. data/.github/workflows/ruby.yml +27 -46
  5. data/.gitignore +2 -0
  6. data/.rubocop.yml +3 -0
  7. data/Dockerfile +1 -1
  8. data/Gemfile +7 -8
  9. data/README.md +4 -4
  10. data/doc/pages/source/documentation/index.md +4 -4
  11. data/dynflow.gemspec +2 -3
  12. data/examples/example_helper.rb +1 -1
  13. data/examples/execution_plan_chaining.rb +56 -0
  14. data/examples/remote_executor.rb +5 -8
  15. data/lib/dynflow/action/format.rb +4 -33
  16. data/lib/dynflow/debug/telemetry/persistence.rb +1 -1
  17. data/lib/dynflow/delayed_executors/abstract_core.rb +1 -1
  18. data/lib/dynflow/delayed_plan.rb +6 -0
  19. data/lib/dynflow/director.rb +9 -1
  20. data/lib/dynflow/executors/sidekiq/core.rb +1 -1
  21. data/lib/dynflow/executors/sidekiq/redis_locking.rb +10 -3
  22. data/lib/dynflow/extensions/msgpack.rb +4 -0
  23. data/lib/dynflow/persistence.rb +14 -2
  24. data/lib/dynflow/persistence_adapters/abstract.rb +9 -1
  25. data/lib/dynflow/persistence_adapters/sequel.rb +91 -48
  26. data/lib/dynflow/persistence_adapters/sequel_migrations/025_create_execution_plan_dependencies.rb +22 -0
  27. data/lib/dynflow/rails/daemon.rb +16 -7
  28. data/lib/dynflow/testing.rb +1 -1
  29. data/lib/dynflow/version.rb +1 -1
  30. data/lib/dynflow/world.rb +34 -13
  31. data/lib/dynflow.rb +0 -1
  32. data/test/action_test.rb +3 -3
  33. data/test/bats/helpers/common.bash +67 -0
  34. data/test/bats/helpers/containers.bash +146 -0
  35. data/test/bats/setup_suite.bash +46 -0
  36. data/test/bats/sidekiq-orchestrator.bats +178 -0
  37. data/test/bats/teardown_suite.bash +16 -0
  38. data/test/concurrency_control_test.rb +0 -1
  39. data/test/daemon_test.rb +21 -2
  40. data/test/extensions_test.rb +3 -3
  41. data/test/future_execution_test.rb +150 -3
  42. data/test/persistence_test.rb +70 -3
  43. data/test/support/dummy_example.rb +4 -0
  44. data/test/test_helper.rb +19 -4
  45. data/web/views/show.erb +24 -0
  46. metadata +15 -17
  47. data/.github/install_dependencies.sh +0 -35
@@ -0,0 +1,146 @@
1
+ #!/usr/bin/env bash
2
+ # Container helper functions for bats tests
3
+
4
+ # Default container names
5
+ POSTGRES_CONTAINER_NAME="${POSTGRES_CONTAINER_NAME:-dynflow-test-postgres}"
6
+ REDIS_CONTAINER_NAME="${REDIS_CONTAINER_NAME:-dynflow-test-redis}"
7
+
8
+ # Default ports
9
+ POSTGRES_PORT="${POSTGRES_PORT:-15432}"
10
+ REDIS_PORT="${REDIS_PORT:-16379}"
11
+
12
+ # Database credentials
13
+ POSTGRES_USER="${POSTGRES_USER:-dynflow_test}"
14
+ POSTGRES_PASSWORD="${POSTGRES_PASSWORD:-dynflow_test_pass}"
15
+ POSTGRES_DB="${POSTGRES_DB:-dynflow_test}"
16
+
17
+ # Container images
18
+ POSTGRES_IMAGE="${POSTGRES_IMAGE:-docker.io/library/postgres:15}"
19
+ REDIS_IMAGE="${REDIS_IMAGE:-docker.io/library/redis:7-alpine}"
20
+
21
+ # Start PostgreSQL container
22
+ start_postgres() {
23
+ echo "Starting PostgreSQL container: ${POSTGRES_CONTAINER_NAME}" >&2
24
+
25
+ podman run -d \
26
+ --name "${POSTGRES_CONTAINER_NAME}" \
27
+ -e POSTGRES_USER="${POSTGRES_USER}" \
28
+ -e POSTGRES_PASSWORD="${POSTGRES_PASSWORD}" \
29
+ -e POSTGRES_DB="${POSTGRES_DB}" \
30
+ -p "${POSTGRES_PORT}:5432" \
31
+ "${POSTGRES_IMAGE}" \
32
+ postgres -c fsync=off -c synchronous_commit=off -c full_page_writes=off
33
+
34
+ # Wait for PostgreSQL to be ready
35
+ echo "Waiting for PostgreSQL to be ready..." >&2
36
+ local max_attempts=30
37
+ local attempt=0
38
+
39
+ while [ $attempt -lt $max_attempts ]; do
40
+ if podman exec "${POSTGRES_CONTAINER_NAME}" pg_isready -U "${POSTGRES_USER}" > /dev/null 2>&1; then
41
+ echo "PostgreSQL is ready" >&2
42
+ return 0
43
+ fi
44
+ attempt=$((attempt + 1))
45
+ sleep 1
46
+ done
47
+
48
+ echo "ERROR: PostgreSQL failed to start within ${max_attempts} seconds" >&2
49
+ return 1
50
+ }
51
+
52
+ stop_container() {
53
+ local container="$1"
54
+ local with_volumes="$2"
55
+
56
+ echo "Stopping container: ${container}" >&2
57
+ if podman ps -a --format "{{.Names}}" | grep -q "^${container}$"; then
58
+ podman stop -t 2 "${container}" > /dev/null 2>&1 || true
59
+ if [ "$with_volumes" = "1" ]; then
60
+ podman rm -v -f "${container}" > /dev/null 2>&1 || true
61
+ else
62
+ podman rm -f "${container}" > /dev/null 2>&1 || true
63
+ fi
64
+ fi
65
+ }
66
+
67
+ # Stop PostgreSQL container
68
+ stop_postgres() {
69
+ stop_container "$POSTGRES_CONTAINER_NAME" "$1"
70
+ }
71
+
72
+ # Start Redis container
73
+ start_redis() {
74
+ echo "Starting Redis container: ${REDIS_CONTAINER_NAME}" >&2
75
+
76
+ podman run -d \
77
+ --name "${REDIS_CONTAINER_NAME}" \
78
+ -p "${REDIS_PORT}:6379" \
79
+ "${REDIS_IMAGE}"
80
+
81
+ # Wait for Redis to be ready
82
+ echo "Waiting for Redis to be ready..." >&2
83
+ local max_attempts=30
84
+ local attempt=0
85
+
86
+ while [ $attempt -lt $max_attempts ]; do
87
+ if podman exec "${REDIS_CONTAINER_NAME}" redis-cli ping > /dev/null 2>&1; then
88
+ echo "Redis is ready" >&2
89
+ return 0
90
+ fi
91
+ attempt=$((attempt + 1))
92
+ sleep 1
93
+ done
94
+
95
+ echo "ERROR: Redis failed to start within ${max_attempts} seconds" >&2
96
+ return 1
97
+ }
98
+
99
+ # Stop Redis container
100
+ stop_redis() {
101
+ stop_container "$REDIS_CONTAINER_NAME" "$1"
102
+ }
103
+
104
+ # Check if PostgreSQL container is running
105
+ is_postgres_running() {
106
+ podman ps --format "{{.Names}}" | grep -q "^${POSTGRES_CONTAINER_NAME}$"
107
+ }
108
+
109
+ # Check if Redis container is running
110
+ is_redis_running() {
111
+ podman ps --format "{{.Names}}" | grep -q "^${REDIS_CONTAINER_NAME}$"
112
+ }
113
+
114
+ # Get PostgreSQL connection string
115
+ get_postgres_url() {
116
+ echo "postgresql://${POSTGRES_USER}:${POSTGRES_PASSWORD}@localhost:${POSTGRES_PORT}/${POSTGRES_DB}"
117
+ }
118
+
119
+ # Get Redis URL
120
+ get_redis_url() {
121
+ echo "redis://localhost:${REDIS_PORT}/0"
122
+ }
123
+
124
+ # Execute SQL in PostgreSQL container
125
+ exec_sql() {
126
+ local sql="$1"
127
+ podman exec "${POSTGRES_CONTAINER_NAME}" \
128
+ psql -U "${POSTGRES_USER}" -d "${POSTGRES_DB}" -c "${sql}"
129
+ }
130
+
131
+ # Execute Redis command
132
+ exec_redis() {
133
+ podman exec "${REDIS_CONTAINER_NAME}" redis-cli "$@"
134
+ }
135
+
136
+ # Clean up all test containers
137
+ cleanup_containers() {
138
+ stop_postgres 1
139
+ stop_redis 1
140
+ }
141
+
142
+ # Start all test containers
143
+ start_containers() {
144
+ start_postgres || return 1
145
+ start_redis || return 1
146
+ }
@@ -0,0 +1,46 @@
1
+ #!/usr/bin/env bash
2
+ # Suite-level setup - runs once before all tests
3
+
4
+ # Load container helpers
5
+ source "$(dirname "${BASH_SOURCE[0]}")/helpers/containers.bash"
6
+ source "$(dirname "${BASH_SOURCE[0]}")/helpers/common.bash"
7
+
8
+ # This function runs once before all tests in the suite
9
+ setup_suite() {
10
+ echo "=== Setting up bats test suite ===" >&2
11
+
12
+ # Verify podman is available
13
+ if ! command -v podman &> /dev/null; then
14
+ echo "ERROR: podman is not installed or not in PATH" >&2
15
+ exit 1
16
+ fi
17
+
18
+ # Check if bundle is available
19
+ PROJECT_ROOT="$(get_project_root)"
20
+ if ! command -v bundle &> /dev/null; then
21
+ echo "WARNING: bundler is not installed" >&2
22
+ else
23
+ # Install dependencies if needed
24
+ echo "Checking bundle dependencies..." >&2
25
+ cd "${PROJECT_ROOT}" && bundle check > /dev/null 2>&1 || bundle install
26
+ fi
27
+
28
+ # Pull container images if not already present
29
+ echo "Checking container images..." >&2
30
+
31
+ if ! podman image exists "${POSTGRES_IMAGE}"; then
32
+ echo "Pulling PostgreSQL image: ${POSTGRES_IMAGE}" >&2
33
+ podman pull "${POSTGRES_IMAGE}"
34
+ fi
35
+
36
+ if ! podman image exists "${REDIS_IMAGE}"; then
37
+ echo "Pulling Redis image: ${REDIS_IMAGE}" >&2
38
+ podman pull "${REDIS_IMAGE}"
39
+ fi
40
+
41
+ # Clean up any existing test containers from previous runs
42
+ echo "Cleaning up any existing test containers..." >&2
43
+ cleanup_containers
44
+
45
+ echo "=== Test suite setup complete ===" >&2
46
+ }
@@ -0,0 +1,178 @@
1
+ #!/usr/bin/env bats
2
+ # Example bats test file for Dynflow
3
+
4
+ # Load helper functions
5
+ load helpers/containers
6
+ load helpers/common
7
+
8
+ # Setup runs before each test
9
+ setup() {
10
+ # Setup environment variables
11
+ setup_test_env
12
+
13
+ # Ensure containers are running
14
+ is_postgres_running && stop_postgres
15
+ start_postgres
16
+ is_redis_running && stop_redis
17
+ start_redis
18
+ }
19
+
20
+ # Teardown runs after each test
21
+ teardown() {
22
+ (
23
+ cd "$TEST_PIDDIR" || return 1
24
+ shopt -s nullglob
25
+ for pidfile in * ; do
26
+ kill -9 "$(cat "$pidfile")"
27
+ done
28
+ )
29
+ cleanup_containers 1
30
+ }
31
+
32
+ @test "sanity" {
33
+ cd "$(get_project_root)"
34
+
35
+ run_background 'o1' bundle exec sidekiq -c 1 -r ./examples/remote_executor.rb -q dynflow_orchestrator
36
+ wait_for 5 1 grep 'dynflow: Acquired orchestrator lock, entering active mode.' "$(bg_output_file o1)"
37
+
38
+ run_background 'w1' bundle exec sidekiq -r ./examples/remote_executor.rb -q default
39
+ wait_for 5 1 grep -P 'class=Dynflow::Executors::Sidekiq::WorkerJobs::DrainMarker.*INFO: done' "$(bg_output_file w1)"
40
+
41
+ timeout 10 bundle exec ruby examples/remote_executor.rb client 1
42
+ wait_for 1 1 grep -P 'dynflow: ExecutionPlan.*running >>.*stopped' "$(bg_output_file o1)"
43
+ }
44
+
45
+ @test "only one orchestrator can be active at a time" {
46
+ cd "$(get_project_root)"
47
+
48
+ run_background 'o1' bundle exec sidekiq -r ./examples/remote_executor.rb -q dynflow_orchestrator -c 1
49
+ wait_for 30 1 grep 'dynflow: Acquired orchestrator lock, entering active mode.' "$(bg_output_file o1)"
50
+
51
+ run_background 'o2' bundle exec sidekiq -r ./examples/remote_executor.rb -q dynflow_orchestrator -c 1
52
+ wait_for 30 1 grep 'dynflow: Orchestrator lock already taken, entering passive mode.' "$(bg_output_file o2)"
53
+ }
54
+
55
+ @test "multiple orchestrators can be active with multiple redis dbs" {
56
+ cd "$(get_project_root)"
57
+
58
+ run_background 'o1' bundle exec sidekiq -r ./examples/remote_executor.rb -q dynflow_orchestrator -c 1
59
+ wait_for 30 1 grep 'dynflow: Acquired orchestrator lock, entering active mode.' "$(bg_output_file o1)"
60
+
61
+ run_background 'w1' bundle exec sidekiq -r ./examples/remote_executor.rb -q default
62
+
63
+ export REDIS_URL=${REDIS_URL%/0}/1
64
+ run_background 'o2' bundle exec sidekiq -r ./examples/remote_executor.rb -q dynflow_orchestrator -c 1
65
+ wait_for 30 1 grep 'dynflow: Acquired orchestrator lock, entering active mode.' "$(bg_output_file o2)"
66
+
67
+ run_background 'w2' bundle exec sidekiq -r ./examples/remote_executor.rb -q default
68
+
69
+ # The client performs a round robin between the available executors
70
+ # This should lead to each orchestrator handling one execution plan
71
+ timeout 60 bundle exec ruby examples/remote_executor.rb client 2
72
+ wait_for 1 1 grep -P 'dynflow: ExecutionPlan.*running >>.*stopped' "$(bg_output_file o1)"
73
+ wait_for 1 1 grep -P 'dynflow: ExecutionPlan.*running >>.*stopped' "$(bg_output_file o2)"
74
+ }
75
+
76
+ @test "orchestrators do fail over" {
77
+ cd "$(get_project_root)"
78
+
79
+ run_background 'o1' bundle exec sidekiq -r ./examples/remote_executor.rb -q dynflow_orchestrator -c 1
80
+ wait_for 30 1 grep 'dynflow: Acquired orchestrator lock, entering active mode.' "$(bg_output_file o1)"
81
+
82
+ run_background 'o2' bundle exec sidekiq -r ./examples/remote_executor.rb -q dynflow_orchestrator -c 1
83
+ wait_for 30 1 grep 'dynflow: Orchestrator lock already taken, entering passive mode.' "$(bg_output_file o2)"
84
+
85
+ kill -15 "$(cat "$TEST_PIDDIR/o1.pid")"
86
+ wait_for 120 1 grep 'dynflow: Acquired orchestrator lock, entering active mode.' "$(bg_output_file o2)"
87
+ }
88
+
89
+ @test "active orchestrator exits when pg goes away for good" {
90
+ cd "$(get_project_root)"
91
+
92
+ run_background 'o1' bundle exec sidekiq -r ./examples/remote_executor.rb -q dynflow_orchestrator -c 1
93
+ wait_for 30 1 grep 'dynflow: Acquired orchestrator lock, entering active mode.' "$(bg_output_file o1)"
94
+
95
+ run_background 'w1' bundle exec sidekiq -r ./examples/remote_executor.rb -q default
96
+ wait_for 5 1 grep 'dynflow: Finished performing validity checks' "$(bg_output_file o1)"
97
+
98
+ podman stop "$POSTGRES_CONTAINER_NAME"
99
+ wait_for 60 1 grep 'dynflow: World terminated, exiting.' "$(bg_output_file o1)"
100
+ }
101
+
102
+ @test "active orchestrator can withstand temporary pg connection drop" {
103
+ cd "$(get_project_root)"
104
+
105
+ run_background 'o1' bundle exec sidekiq -r ./examples/remote_executor.rb -q dynflow_orchestrator -c 1
106
+ wait_for 30 1 grep 'dynflow: Acquired orchestrator lock, entering active mode.' "$(bg_output_file o1)"
107
+
108
+ run_background 'w1' bundle exec sidekiq -r ./examples/remote_executor.rb -q default
109
+ wait_for 5 1 grep 'dynflow: Finished performing validity checks' "$(bg_output_file o1)"
110
+
111
+ podman stop "$POSTGRES_CONTAINER_NAME"
112
+ wait_for 30 1 grep 'dynflow: Persistence retry no. 1' "$(bg_output_file o1)"
113
+ podman start "$POSTGRES_CONTAINER_NAME"
114
+ wait_for 30 1 grep 'dynflow: Executor heartbeat' "$(bg_output_file o1)"
115
+
116
+ timeout 30 bundle exec ruby examples/remote_executor.rb client 1
117
+ wait_for 1 1 grep -P 'dynflow: ExecutionPlan.*running >>.*stopped' "$(bg_output_file o1)"
118
+ }
119
+
120
+ @test "active orchestrator can survive a brief redis connection drop" {
121
+ cd "$(get_project_root)"
122
+
123
+ run_background 'o1' bundle exec sidekiq -r ./examples/remote_executor.rb -q dynflow_orchestrator -c 1
124
+ wait_for 30 1 grep 'dynflow: Acquired orchestrator lock, entering active mode.' "$(bg_output_file o1)"
125
+
126
+ run_background 'w1' bundle exec sidekiq -r ./examples/remote_executor.rb -q default
127
+ wait_for 5 1 grep 'dynflow: Finished performing validity checks' "$(bg_output_file o1)"
128
+
129
+ stop_redis
130
+ wait_for 30 1 grep 'Error connecting to Redis' "$(bg_output_file o1)"
131
+ start_redis
132
+
133
+ timeout 10 bundle exec ruby examples/remote_executor.rb client 1
134
+ wait_for 1 1 grep -P 'dynflow: ExecutionPlan.*running >>.*stopped' "$(bg_output_file o1)"
135
+ }
136
+
137
+ @test "active orchestrator can survive a longer redis connection drop" {
138
+ cd "$(get_project_root)"
139
+
140
+ run_background 'o1' bundle exec sidekiq -r ./examples/remote_executor.rb -q dynflow_orchestrator -c 1
141
+ wait_for 30 1 grep 'dynflow: Acquired orchestrator lock, entering active mode.' "$(bg_output_file o1)"
142
+
143
+ run_background 'w1' bundle exec sidekiq -r ./examples/remote_executor.rb -q default
144
+ wait_for 5 1 grep 'dynflow: Finished performing validity checks' "$(bg_output_file o1)"
145
+
146
+ stop_redis 1
147
+ wait_for 30 1 grep 'Error connecting to Redis' "$(bg_output_file o1)"
148
+ start_redis
149
+
150
+ wait_for 30 1 grep 'The orchestrator lock was lost, reacquired' "$(bg_output_file o1)"
151
+
152
+ timeout 10 bundle exec ruby examples/remote_executor.rb client 1
153
+ wait_for 1 1 grep -P 'dynflow: ExecutionPlan.*running >>.*stopped' "$(bg_output_file o1)"
154
+ }
155
+
156
+ @test "orchestrators can fail over if active one goes away during downtime" {
157
+ cd "$(get_project_root)"
158
+
159
+ run_background 'o1' bundle exec sidekiq -r ./examples/remote_executor.rb -q dynflow_orchestrator -c 1
160
+ wait_for 30 1 grep 'dynflow: Acquired orchestrator lock, entering active mode.' "$(bg_output_file o1)"
161
+
162
+ run_background 'o2' bundle exec sidekiq -r ./examples/remote_executor.rb -q dynflow_orchestrator -c 1
163
+ wait_for 30 1 grep 'dynflow: Orchestrator lock already taken, entering passive mode.' "$(bg_output_file o2)"
164
+
165
+ run_background 'w1' bundle exec sidekiq -r ./examples/remote_executor.rb -q default
166
+ wait_for 5 1 grep 'dynflow: Finished performing validity checks' "$(bg_output_file o1)"
167
+
168
+ stop_redis 1
169
+ wait_for 30 1 grep 'Error connecting to Redis' "$(bg_output_file o1)"
170
+ kill -15 "$(cat "$TEST_PIDDIR/o1.pid")"
171
+ start_redis
172
+
173
+ wait_for 120 1 grep 'dynflow: Acquired orchestrator lock, entering active mode.' "$(bg_output_file o2)"
174
+ wait_for 120 1 grep 'dynflow: Finished performing validity checks' "$(bg_output_file o2)"
175
+
176
+ timeout 10 bundle exec ruby examples/remote_executor.rb client 1
177
+ wait_for 1 1 grep -P 'dynflow: ExecutionPlan.*running >>.*stopped' "$(bg_output_file o2)"
178
+ }
@@ -0,0 +1,16 @@
1
+ #!/usr/bin/env bash
2
+ # Suite-level teardown - runs once after all tests
3
+
4
+ # Load container helpers
5
+ source "$(dirname "${BASH_SOURCE[0]}")/helpers/containers.bash"
6
+
7
+ # This function runs once after all tests in the suite
8
+ teardown_suite() {
9
+ echo "=== Tearing down bats test suite ===" >&2
10
+
11
+ # Clean up all test containers
12
+ echo "Cleaning up test containers..." >&2
13
+ cleanup_containers
14
+
15
+ echo "=== Test suite teardown complete ===" >&2
16
+ }
@@ -54,7 +54,6 @@ module Dynflow
54
54
  def run(event = nil)
55
55
  unless output[:slept]
56
56
  output[:slept] = true
57
- puts "SLEEPING" if input[:should_sleep]
58
57
  suspend { |suspended| world.clock.ping(suspended, 100, [:run]) } if input[:should_sleep]
59
58
  end
60
59
  end
data/test/daemon_test.rb CHANGED
@@ -5,8 +5,24 @@ require 'active_support'
5
5
  require 'mocha/minitest'
6
6
  require 'logging'
7
7
  require 'dynflow/testing'
8
+ require 'ostruct'
8
9
  require_relative '../lib/dynflow/rails'
9
10
 
11
+ class StdIOWrapper
12
+ def initialize(logger, error = false)
13
+ @logger = logger
14
+ @error = error
15
+ end
16
+
17
+ def puts(msg = nil)
18
+ if @error
19
+ @logger.error(msg)
20
+ else
21
+ @logger.info(msg)
22
+ end
23
+ end
24
+ end
25
+
10
26
  class DaemonTest < ActiveSupport::TestCase
11
27
  setup do
12
28
  @dynflow_memory_watcher = mock('memory_watcher')
@@ -15,6 +31,9 @@ class DaemonTest < ActiveSupport::TestCase
15
31
  @dynflow_memory_watcher,
16
32
  @daemons
17
33
  )
34
+ logger = WorldFactory.logger_adapter.logger
35
+ @daemon.stubs(:stdout).returns(StdIOWrapper.new(logger, false))
36
+ @daemon.stubs(:stderr).returns(StdIOWrapper.new(logger, true))
18
37
  @world_class = mock('dummy world factory')
19
38
  @dummy_world = ::Dynflow::Testing::DummyWorld.new
20
39
  @dummy_world.stubs(:id => '123')
@@ -27,9 +46,9 @@ class DaemonTest < ActiveSupport::TestCase
27
46
  @world_class,
28
47
  ::Dynflow::Rails::Configuration.new
29
48
  )
30
- ::Rails.stubs(:application).returns(OpenStruct.new(:dynflow => @dynflow))
49
+ ::Rails.stubs(:application).returns(::OpenStruct.new(:dynflow => @dynflow))
31
50
  ::Rails.stubs(:root).returns('support/rails')
32
- ::Rails.stubs(:logger).returns(Logging.logger(STDOUT))
51
+ ::Rails.stubs(:logger).returns(logger)
33
52
  @dynflow.require!
34
53
  @dynflow.config.stubs(:increase_db_pool_size? => false)
35
54
  @daemon.stubs(:sleep).returns(true) # don't pause the execution
@@ -1,15 +1,15 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  require_relative 'test_helper'
4
- require 'active_support/time'
4
+ require 'active_support/all'
5
5
 
6
6
  module Dynflow
7
7
  module ExtensionsTest
8
8
  describe 'msgpack extensions' do
9
9
  before do
10
- Thread.current[:time_zone] = ActiveSupport::TimeZone['Europe/Prague']
10
+ Time.zone = ActiveSupport::TimeZone['Europe/Prague']
11
11
  end
12
- after { Thread.current[:time_zone] = nil }
12
+ after { Time.zone = nil }
13
13
 
14
14
  it 'allows {de,}serializing Time' do
15
15
  time = Time.now
@@ -1,6 +1,7 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  require_relative 'test_helper'
4
+ require 'multi_json'
4
5
 
5
6
  module Dynflow
6
7
  module FutureExecutionTest
@@ -75,7 +76,7 @@ module Dynflow
75
76
  it 'finds delayed plans' do
76
77
  @start_at = Time.now.utc - 100
77
78
  delayed_plan
78
- past_delayed_plans = world.persistence.find_past_delayed_plans(@start_at + 10)
79
+ past_delayed_plans = world.persistence.find_ready_delayed_plans(@start_at + 10)
79
80
  _(past_delayed_plans.length).must_equal 1
80
81
  _(past_delayed_plans.first.execution_plan_uuid).must_equal execution_plan.id
81
82
  end
@@ -112,8 +113,8 @@ module Dynflow
112
113
 
113
114
  it 'checks for delayed plans in regular intervals' do
114
115
  start_time = klok.current_time
115
- persistence.expect(:find_past_delayed_plans, [], [start_time])
116
- persistence.expect(:find_past_delayed_plans, [], [start_time + options[:poll_interval]])
116
+ persistence.expect(:find_ready_delayed_plans, [], [start_time])
117
+ persistence.expect(:find_ready_delayed_plans, [], [start_time + options[:poll_interval]])
117
118
  dummy_world.stub :persistence, persistence do
118
119
  _(klok.pending_pings.length).must_equal 0
119
120
  delayed_executor.start.wait
@@ -189,6 +190,152 @@ module Dynflow
189
190
  _(serializer.args).must_equal args
190
191
  end
191
192
  end
193
+
194
+ describe 'execution plan chaining' do
195
+ let(:world) do
196
+ WorldFactory.create_world { |config| config.auto_rescue = true }
197
+ end
198
+
199
+ before do
200
+ @preexisting = world.persistence.find_ready_delayed_plans(Time.now).map(&:execution_plan_uuid)
201
+ end
202
+
203
+ it 'chains two execution plans' do
204
+ plan1 = world.plan(Support::DummyExample::Dummy)
205
+ plan2 = world.chain(plan1.id, Support::DummyExample::Dummy)
206
+
207
+ Concurrent::Promises.resolvable_future.tap do |promise|
208
+ world.execute(plan1.id, promise)
209
+ end.wait
210
+
211
+ plan1 = world.persistence.load_execution_plan(plan1.id)
212
+ _(plan1.state).must_equal :stopped
213
+ ready = world.persistence.find_ready_delayed_plans(Time.now).reject { |p| @preexisting.include? p.execution_plan_uuid }
214
+ _(ready.count).must_equal 1
215
+ _(ready.first.execution_plan_uuid).must_equal plan2.execution_plan_id
216
+ end
217
+
218
+ it 'chains onto multiple execution plans and waits for all to finish' do
219
+ plan1 = world.plan(Support::DummyExample::Dummy)
220
+ plan2 = world.plan(Support::DummyExample::Dummy)
221
+ plan3 = world.chain([plan2.id, plan1.id], Support::DummyExample::Dummy)
222
+
223
+ # Execute and complete plan1
224
+ Concurrent::Promises.resolvable_future.tap do |promise|
225
+ world.execute(plan1.id, promise)
226
+ end.wait
227
+
228
+ plan1 = world.persistence.load_execution_plan(plan1.id)
229
+ _(plan1.state).must_equal :stopped
230
+
231
+ # plan3 should still not be ready because plan2 hasn't finished yet
232
+ ready = world.persistence.find_ready_delayed_plans(Time.now).reject { |p| @preexisting.include? p.execution_plan_uuid }
233
+ _(ready.count).must_equal 0
234
+
235
+ # Execute and complete plan2
236
+ Concurrent::Promises.resolvable_future.tap do |promise|
237
+ world.execute(plan2.id, promise)
238
+ end.wait
239
+
240
+ plan2 = world.persistence.load_execution_plan(plan2.id)
241
+ _(plan2.state).must_equal :stopped
242
+
243
+ # Now plan3 should be ready since both plan1 and plan2 are complete
244
+ ready = world.persistence.find_ready_delayed_plans(Time.now).reject { |p| @preexisting.include? p.execution_plan_uuid }
245
+ _(ready.count).must_equal 1
246
+ _(ready.first.execution_plan_uuid).must_equal plan3.execution_plan_id
247
+ end
248
+
249
+ it 'cancels the chained plan if the prerequisite fails' do
250
+ plan1 = world.plan(Support::DummyExample::FailingDummy)
251
+ plan2 = world.chain(plan1.id, Support::DummyExample::Dummy)
252
+
253
+ Concurrent::Promises.resolvable_future.tap do |promise|
254
+ world.execute(plan1.id, promise)
255
+ end.wait
256
+
257
+ plan1 = world.persistence.load_execution_plan(plan1.id)
258
+ _(plan1.state).must_equal :stopped
259
+ _(plan1.result).must_equal :error
260
+
261
+ # plan2 will appear in ready delayed plans
262
+ ready = world.persistence.find_ready_delayed_plans(Time.now).reject { |p| @preexisting.include? p.execution_plan_uuid }
263
+ _(ready.map(&:execution_plan_uuid)).must_equal [plan2.execution_plan_id]
264
+
265
+ # Process the delayed plan through the director
266
+ work_item = Dynflow::Director::PlanningWorkItem.new(plan2.execution_plan_id, :default, world.id)
267
+ work_item.world = world
268
+ work_item.execute
269
+
270
+ # Now plan2 should be stopped with error due to failed dependency
271
+ plan2 = world.persistence.load_execution_plan(plan2.execution_plan_id)
272
+ _(plan2.state).must_equal :stopped
273
+ _(plan2.result).must_equal :error
274
+ _(plan2.errors.first.message).must_match(/prerequisite execution plans failed/)
275
+ _(plan2.errors.first.message).must_match(/#{plan1.id}/)
276
+ end
277
+
278
+ it 'cancels the chained plan if at least one prerequisite fails' do
279
+ plan1 = world.plan(Support::DummyExample::Dummy)
280
+ plan2 = world.plan(Support::DummyExample::FailingDummy)
281
+ plan3 = world.chain([plan1.id, plan2.id], Support::DummyExample::Dummy)
282
+
283
+ # Execute and complete plan1 successfully
284
+ Concurrent::Promises.resolvable_future.tap do |promise|
285
+ world.execute(plan1.id, promise)
286
+ end.wait
287
+
288
+ plan1 = world.persistence.load_execution_plan(plan1.id)
289
+ _(plan1.state).must_equal :stopped
290
+ _(plan1.result).must_equal :success
291
+
292
+ # plan3 should still not be ready because plan2 hasn't finished yet
293
+ ready = world.persistence.find_ready_delayed_plans(Time.now).reject { |p| @preexisting.include? p.execution_plan_uuid }
294
+ _(ready).must_equal []
295
+
296
+ # Execute and complete plan2 with failure
297
+ Concurrent::Promises.resolvable_future.tap do |promise|
298
+ world.execute(plan2.id, promise)
299
+ end.wait
300
+
301
+ plan2 = world.persistence.load_execution_plan(plan2.id)
302
+ _(plan2.state).must_equal :stopped
303
+ _(plan2.result).must_equal :error
304
+
305
+ # plan3 will now appear in ready delayed plans even though one prerequisite failed
306
+ ready = world.persistence.find_ready_delayed_plans(Time.now).reject { |p| @preexisting.include? p.execution_plan_uuid }
307
+ _(ready.map(&:execution_plan_uuid)).must_equal [plan3.execution_plan_id]
308
+
309
+ # Process the delayed plan through the director
310
+ work_item = Dynflow::Director::PlanningWorkItem.new(plan3.execution_plan_id, :default, world.id)
311
+ work_item.world = world
312
+ work_item.execute
313
+
314
+ # Now plan3 should be stopped with error due to failed dependency
315
+ plan3 = world.persistence.load_execution_plan(plan3.execution_plan_id)
316
+ _(plan3.state).must_equal :stopped
317
+ _(plan3.result).must_equal :error
318
+ _(plan3.errors.first.message).must_match(/prerequisite execution plans failed/)
319
+ _(plan3.errors.first.message).must_match(/#{plan2.id}/)
320
+ end
321
+
322
+ it 'chains runs the chained plan if the prerequisite was halted' do
323
+ plan1 = world.plan(Support::DummyExample::Dummy)
324
+ plan2 = world.chain(plan1.id, Support::DummyExample::Dummy)
325
+
326
+ world.halt(plan1.id)
327
+ Concurrent::Promises.resolvable_future.tap do |promise|
328
+ world.execute(plan1.id, promise)
329
+ end.wait
330
+
331
+ plan1 = world.persistence.load_execution_plan(plan1.id)
332
+ _(plan1.state).must_equal :stopped
333
+ _(plan1.result).must_equal :pending
334
+ ready = world.persistence.find_ready_delayed_plans(Time.now).reject { |p| @preexisting.include? p.execution_plan_uuid }
335
+ _(ready.count).must_equal 1
336
+ _(ready.first.execution_plan_uuid).must_equal plan2.execution_plan_id
337
+ end
338
+ end
192
339
  end
193
340
  end
194
341
  end