dynflow 1.4.8 → 1.6.2
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/{test/prepare_travis_env.sh → .github/install_dependencies.sh} +2 -2
- data/.github/workflows/ruby.yml +116 -0
- data/dynflow.gemspec +1 -0
- data/examples/chunked_output_benchmark.rb +77 -0
- data/extras/expand/main.go +180 -0
- data/lib/dynflow/action/suspended.rb +4 -4
- data/lib/dynflow/action/timeouts.rb +2 -2
- data/lib/dynflow/action.rb +15 -4
- data/lib/dynflow/clock.rb +2 -2
- data/lib/dynflow/delayed_executors/abstract_core.rb +11 -9
- data/lib/dynflow/director.rb +42 -5
- data/lib/dynflow/dispatcher/client_dispatcher.rb +8 -2
- data/lib/dynflow/dispatcher/executor_dispatcher.rb +12 -2
- data/lib/dynflow/dispatcher.rb +7 -2
- data/lib/dynflow/execution_history.rb +1 -1
- data/lib/dynflow/execution_plan/hooks.rb +1 -1
- data/lib/dynflow/execution_plan/steps/abstract_flow_step.rb +1 -0
- data/lib/dynflow/execution_plan.rb +16 -5
- data/lib/dynflow/executors/abstract/core.rb +10 -1
- data/lib/dynflow/executors/parallel.rb +6 -2
- data/lib/dynflow/extensions/msgpack.rb +41 -0
- data/lib/dynflow/extensions.rb +6 -0
- data/lib/dynflow/flows/abstract.rb +14 -0
- data/lib/dynflow/flows/abstract_composed.rb +2 -7
- data/lib/dynflow/flows/atom.rb +2 -2
- data/lib/dynflow/flows/concurrence.rb +2 -0
- data/lib/dynflow/flows/registry.rb +32 -0
- data/lib/dynflow/flows/sequence.rb +2 -0
- data/lib/dynflow/flows.rb +1 -0
- data/lib/dynflow/persistence.rb +10 -0
- data/lib/dynflow/persistence_adapters/sequel.rb +51 -16
- data/lib/dynflow/persistence_adapters/sequel_migrations/021_create_output_chunks.rb +30 -0
- data/lib/dynflow/persistence_adapters/sequel_migrations/022_store_flows_as_msgpack.rb +90 -0
- data/lib/dynflow/persistence_adapters/sequel_migrations/023_sqlite_workarounds.rb +19 -0
- data/lib/dynflow/serializable.rb +2 -2
- data/lib/dynflow/testing/dummy_coordinator.rb +10 -0
- data/lib/dynflow/testing/dummy_planned_action.rb +4 -0
- data/lib/dynflow/testing/dummy_world.rb +2 -1
- data/lib/dynflow/testing/in_thread_executor.rb +2 -2
- data/lib/dynflow/testing/in_thread_world.rb +5 -5
- data/lib/dynflow/testing.rb +1 -0
- data/lib/dynflow/version.rb +1 -1
- data/lib/dynflow/world.rb +16 -4
- data/lib/dynflow.rb +2 -1
- data/test/dispatcher_test.rb +6 -0
- data/test/execution_plan_hooks_test.rb +36 -0
- data/test/extensions_test.rb +42 -0
- data/test/flows_test.rb +44 -0
- data/test/future_execution_test.rb +6 -3
- data/test/persistence_test.rb +2 -2
- data/web/views/flow_step.erb +1 -0
- metadata +37 -5
- data/.travis.yml +0 -33
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: f0e5c6a8141eaea583bebabd135cdbfcff6c2516410db7930595e3868383f331
|
4
|
+
data.tar.gz: bc72a36dd5284f5cea95651de2620f0d41db5f315195a4ed573155c94b510bb9
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: c9b0efe531cf9d3c45432bfc94542f47ff6a72fe20bd96a4541242f58a13abc77603a62fcb5294e481feb9a28c49c6eebd2a9747092da6fe7a5e67ad1dc8e9d3
|
7
|
+
data.tar.gz: d9c66e79fad07d6e6bfb9b8e265af0062adf8ba2b0d8d126e46080ca890200db07346f7c4702518a9c4d4ace4ae94d668ce20761166c9e81a7ef5531cdc85240
|
@@ -1,5 +1,7 @@
|
|
1
1
|
#!/usr/bin/env bash
|
2
2
|
|
3
|
+
set -x
|
4
|
+
|
3
5
|
echo "Setting the environment to use ${DB} database"
|
4
6
|
|
5
7
|
BUNDLE_CONFIG=.bundle/config
|
@@ -12,11 +14,9 @@ EOF
|
|
12
14
|
case $DB in
|
13
15
|
mysql)
|
14
16
|
sed -i 's/:mysql//'g $BUNDLE_CONFIG
|
15
|
-
mysql -e 'create database travis_ci_test;'
|
16
17
|
;;
|
17
18
|
postgresql)
|
18
19
|
sed -i 's/:postgresql//'g $BUNDLE_CONFIG
|
19
|
-
psql -c 'create database travis_ci_test;' -U postgres
|
20
20
|
;;
|
21
21
|
sqlite3)
|
22
22
|
# the tests are by default using sqlite3: do nothing
|
@@ -0,0 +1,116 @@
|
|
1
|
+
# This workflow uses actions that are not certified by GitHub.
|
2
|
+
# They are provided by a third-party and are governed by
|
3
|
+
# separate terms of service, privacy policy, and support
|
4
|
+
# documentation.
|
5
|
+
# This workflow will download a prebuilt Ruby version, install dependencies and run tests with Rake
|
6
|
+
# For more information see: https://github.com/marketplace/actions/setup-ruby-jruby-and-truffleruby
|
7
|
+
|
8
|
+
name: Ruby
|
9
|
+
|
10
|
+
on: [pull_request]
|
11
|
+
|
12
|
+
env:
|
13
|
+
TESTOPTS: --verbose
|
14
|
+
|
15
|
+
jobs:
|
16
|
+
rubocop:
|
17
|
+
runs-on: ubuntu-latest
|
18
|
+
steps:
|
19
|
+
- uses: actions/checkout@v2
|
20
|
+
- name: Setup Ruby
|
21
|
+
uses: ruby/setup-ruby@v1
|
22
|
+
with:
|
23
|
+
ruby-version: 2.7
|
24
|
+
- name: Setup
|
25
|
+
run: |
|
26
|
+
gem install bundler
|
27
|
+
bundle install --jobs=3 --retry=3
|
28
|
+
- name: Run rubocop
|
29
|
+
run: bundle exec rubocop
|
30
|
+
|
31
|
+
test:
|
32
|
+
runs-on: ubuntu-latest
|
33
|
+
needs: rubocop
|
34
|
+
strategy:
|
35
|
+
fail-fast: false
|
36
|
+
matrix:
|
37
|
+
ruby_version:
|
38
|
+
- 2.5.0
|
39
|
+
- 2.6.0
|
40
|
+
- 2.7.0
|
41
|
+
- 3.0.0
|
42
|
+
concurrent_ruby_ext:
|
43
|
+
- 'true'
|
44
|
+
- 'false'
|
45
|
+
db:
|
46
|
+
- postgresql
|
47
|
+
- mysql
|
48
|
+
- sqlite3
|
49
|
+
include:
|
50
|
+
- db: postgresql
|
51
|
+
conn_string: postgres://postgres@localhost/travis_ci_test
|
52
|
+
- db: mysql
|
53
|
+
conn_string: mysql2://root@127.0.0.1/travis_ci_test
|
54
|
+
- db: sqlite3
|
55
|
+
conn_string: sqlite:/
|
56
|
+
exclude:
|
57
|
+
- db: mysql
|
58
|
+
ruby_version: 2.5.0
|
59
|
+
- db: mysql
|
60
|
+
ruby_version: 2.6.0
|
61
|
+
- db: mysql
|
62
|
+
ruby_version: 3.0.0
|
63
|
+
- db: mysql
|
64
|
+
concurrent_ruby_ext: 'true'
|
65
|
+
- db: sqlite3
|
66
|
+
ruby_version: 2.5.0
|
67
|
+
- db: sqlite3
|
68
|
+
ruby_version: 2.6.0
|
69
|
+
- db: sqlite3
|
70
|
+
ruby_version: 3.0.0
|
71
|
+
- db: sqlite3
|
72
|
+
concurrent_ruby_ext: 'true'
|
73
|
+
- db: postgresql
|
74
|
+
ruby_version: 2.5.0
|
75
|
+
concurrent_ruby_ext: 'true'
|
76
|
+
- db: postgresql
|
77
|
+
ruby_version: 2.6.0
|
78
|
+
concurrent_ruby_ext: 'true'
|
79
|
+
- db: postgresql
|
80
|
+
ruby_version: 3.0.0
|
81
|
+
concurrent_ruby_ext: 'true'
|
82
|
+
|
83
|
+
services:
|
84
|
+
postgres:
|
85
|
+
image: postgres:12.1
|
86
|
+
ports: ['5432:5432']
|
87
|
+
options: --health-cmd pg_isready --health-interval 10s --health-timeout 5s --health-retries 5
|
88
|
+
env:
|
89
|
+
POSTGRES_DB: travis_ci_test
|
90
|
+
mariadb:
|
91
|
+
image: mariadb:10
|
92
|
+
ports: ['3306:3306']
|
93
|
+
env:
|
94
|
+
MYSQL_ALLOW_EMPTY_PASSWORD: 'yes'
|
95
|
+
MYSQL_DATABASE: travis_ci_test
|
96
|
+
redis:
|
97
|
+
image: redis:latest
|
98
|
+
ports: ['6379:6379']
|
99
|
+
|
100
|
+
env:
|
101
|
+
DB: ${{ matrix.db }}
|
102
|
+
DB_CONN_STRING: ${{ matrix.conn_string }}
|
103
|
+
CONCURRENT_RUBY_EXT: "${{ matrix.concurrent_ruby_ext }}"
|
104
|
+
|
105
|
+
steps:
|
106
|
+
- uses: actions/checkout@v2
|
107
|
+
- name: Set up Ruby
|
108
|
+
# To automatically get bug fixes and new Ruby versions for ruby/setup-ruby,
|
109
|
+
# change this to (see https://github.com/ruby/setup-ruby#versioning):
|
110
|
+
uses: ruby/setup-ruby@v1
|
111
|
+
with:
|
112
|
+
ruby-version: ${{ matrix.ruby_version }}
|
113
|
+
- name: Install dependencies
|
114
|
+
run: .github/install_dependencies.sh
|
115
|
+
- name: Run tests
|
116
|
+
run: bundle exec rake test
|
data/dynflow.gemspec
CHANGED
@@ -20,6 +20,7 @@ Gem::Specification.new do |s|
|
|
20
20
|
s.required_ruby_version = '>= 2.3.0'
|
21
21
|
|
22
22
|
s.add_dependency "multi_json"
|
23
|
+
s.add_dependency "msgpack", '~> 1.3', '>= 1.3.3'
|
23
24
|
s.add_dependency "apipie-params"
|
24
25
|
s.add_dependency "algebrick", '~> 0.7.0'
|
25
26
|
s.add_dependency "concurrent-ruby", '~> 1.1.3'
|
@@ -0,0 +1,77 @@
|
|
1
|
+
#!/usr/bin/env ruby
|
2
|
+
# frozen_string_literal: true
|
3
|
+
|
4
|
+
require_relative 'example_helper'
|
5
|
+
require 'benchmark'
|
6
|
+
|
7
|
+
WORDS = File.readlines('/usr/share/dict/words').map(&:chomp).freeze
|
8
|
+
COUNT = WORDS.count
|
9
|
+
|
10
|
+
module Common
|
11
|
+
def main_loop
|
12
|
+
if output[:current] < input[:limit]
|
13
|
+
consumed = yield
|
14
|
+
output[:current] += consumed
|
15
|
+
plan_event(nil)
|
16
|
+
suspend
|
17
|
+
end
|
18
|
+
end
|
19
|
+
|
20
|
+
def batch
|
21
|
+
WORDS.drop(output[:current]).take(input[:chunk])
|
22
|
+
end
|
23
|
+
end
|
24
|
+
|
25
|
+
class Regular < ::Dynflow::Action
|
26
|
+
include Common
|
27
|
+
|
28
|
+
def run(event = nil)
|
29
|
+
output[:current] ||= 0
|
30
|
+
output[:words] ||= []
|
31
|
+
|
32
|
+
main_loop do
|
33
|
+
words = batch
|
34
|
+
output[:words] << words
|
35
|
+
words.count
|
36
|
+
end
|
37
|
+
end
|
38
|
+
end
|
39
|
+
|
40
|
+
class Chunked < ::Dynflow::Action
|
41
|
+
include Common
|
42
|
+
|
43
|
+
def run(event = nil)
|
44
|
+
output[:current] ||= 0
|
45
|
+
|
46
|
+
main_loop do
|
47
|
+
words = batch
|
48
|
+
output_chunk(words)
|
49
|
+
words.count
|
50
|
+
end
|
51
|
+
end
|
52
|
+
end
|
53
|
+
|
54
|
+
if $0 == __FILE__
|
55
|
+
ExampleHelper.world.action_logger.level = 4
|
56
|
+
ExampleHelper.world.logger.level = 4
|
57
|
+
|
58
|
+
Benchmark.bm do |bm|
|
59
|
+
bm.report('regular 1000 by 100') { ExampleHelper.world.trigger(Regular, limit: 1000, chunk: 100).finished.wait }
|
60
|
+
bm.report('chunked 1000 by 100') { ExampleHelper.world.trigger(Chunked, limit: 1000, chunk: 100).finished.wait }
|
61
|
+
|
62
|
+
bm.report('regular 10_000 by 100') { ExampleHelper.world.trigger(Regular, limit: 10_000, chunk: 100).finished.wait }
|
63
|
+
bm.report('chunked 10_000 by 100') { ExampleHelper.world.trigger(Chunked, limit: 10_000, chunk: 100).finished.wait }
|
64
|
+
|
65
|
+
bm.report('regular 10_000 by 1000') { ExampleHelper.world.trigger(Regular, limit: 10_000, chunk: 1000).finished.wait }
|
66
|
+
bm.report('chunked 10_000 by 1000') { ExampleHelper.world.trigger(Chunked, limit: 10_000, chunk: 1000).finished.wait }
|
67
|
+
|
68
|
+
bm.report('regular 100_000 by 100') { ExampleHelper.world.trigger(Regular, limit: 100_000, chunk: 100).finished.wait }
|
69
|
+
bm.report('chunked 100_000 by 100') { ExampleHelper.world.trigger(Chunked, limit: 100_000, chunk: 100).finished.wait }
|
70
|
+
|
71
|
+
bm.report('regular 100_000 by 1000') { ExampleHelper.world.trigger(Regular, limit: 100_000, chunk: 1000).finished.wait }
|
72
|
+
bm.report('chunked 100_000 by 1000') { ExampleHelper.world.trigger(Chunked, limit: 100_000, chunk: 1000).finished.wait }
|
73
|
+
|
74
|
+
bm.report('regular 100_000 by 10_000') { ExampleHelper.world.trigger(Regular, limit: 100_000, chunk: 10_000).finished.wait }
|
75
|
+
bm.report('chunked 100_000 by 10_000') { ExampleHelper.world.trigger(Chunked, limit: 100_000, chunk: 10_000).finished.wait }
|
76
|
+
end
|
77
|
+
end
|
@@ -0,0 +1,180 @@
|
|
1
|
+
package main
|
2
|
+
|
3
|
+
import (
|
4
|
+
"encoding/csv"
|
5
|
+
"encoding/hex"
|
6
|
+
"encoding/json"
|
7
|
+
"github.com/vmihailenco/msgpack"
|
8
|
+
"io"
|
9
|
+
"os"
|
10
|
+
)
|
11
|
+
|
12
|
+
// dynflow_steps
|
13
|
+
// 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
|
14
|
+
// execution_plan_uuid,id,action_id,data,state,started_at,ended_at,real_time,execution_time,progress_done,progress_weight,class,error,action_class,children,queue
|
15
|
+
//
|
16
|
+
// encoded columns are:
|
17
|
+
// 3 - data
|
18
|
+
// 12 - error
|
19
|
+
// 14 - children
|
20
|
+
|
21
|
+
// dynflow_actions
|
22
|
+
// 0 1 2 3 4 5 6 7 8 9 10
|
23
|
+
// execution_plan_uuid,id,data,caller_execution_plan_id,caller_action_id,class,input,output,plan_step_id,run_step_id,finalize_step_id
|
24
|
+
//
|
25
|
+
// encoded columns are:
|
26
|
+
// 2 - data
|
27
|
+
// 6 - input
|
28
|
+
// 7 - output
|
29
|
+
|
30
|
+
// dynflow_execution_plans
|
31
|
+
// Without msgpack
|
32
|
+
// 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14
|
33
|
+
// uuid,data,state,result,started_at,ended_at,real_time,execution_time,label,class,run_flow,finalize_flow,execution_history,root_plan_step_id,step_ids
|
34
|
+
|
35
|
+
// With msgpack
|
36
|
+
// 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14
|
37
|
+
// uuid,data,state,result,started_at,ended_at,real_time,execution_time,label,class,root_plan_step_id,run_flow,finalize_flow,execution_history,step_ids
|
38
|
+
//
|
39
|
+
// 1 - data
|
40
|
+
// 11 - run_flow
|
41
|
+
// 12 - finalize_flow
|
42
|
+
// 13 - execution_history
|
43
|
+
// 14 - step_ids
|
44
|
+
|
45
|
+
func main() {
|
46
|
+
reader := csv.NewReader(os.Stdin)
|
47
|
+
writer := csv.NewWriter(os.Stdout)
|
48
|
+
defer writer.Flush()
|
49
|
+
|
50
|
+
for {
|
51
|
+
record, err := reader.Read()
|
52
|
+
if err == io.EOF {
|
53
|
+
break
|
54
|
+
}
|
55
|
+
|
56
|
+
writer.Write(processRow(record))
|
57
|
+
}
|
58
|
+
}
|
59
|
+
|
60
|
+
func processRow(record []string) []string {
|
61
|
+
// Execution plan exports have 15 fields, other exports have different counts
|
62
|
+
if len(record) == 15 {
|
63
|
+
record = expandExecutionPlan(record)
|
64
|
+
}
|
65
|
+
|
66
|
+
for i, r := range record {
|
67
|
+
record[i] = reencodeField(r)
|
68
|
+
}
|
69
|
+
|
70
|
+
return record
|
71
|
+
}
|
72
|
+
|
73
|
+
func expandExecutionPlan(record []string) []string {
|
74
|
+
var flow_columns [2]int
|
75
|
+
|
76
|
+
// The step_ids field should be a safe indicator
|
77
|
+
if isHexEncoded(record[14]) {
|
78
|
+
flow_columns = [...]int{11, 12}
|
79
|
+
} else {
|
80
|
+
flow_columns = [...]int{10, 11}
|
81
|
+
}
|
82
|
+
|
83
|
+
for _, i := range flow_columns {
|
84
|
+
record[i] = expandFlow(record[i])
|
85
|
+
}
|
86
|
+
return record
|
87
|
+
}
|
88
|
+
|
89
|
+
func isHexEncoded(field string) bool {
|
90
|
+
return len(field) >= 2 && field[0:2] == "\\x"
|
91
|
+
}
|
92
|
+
|
93
|
+
func reencodeField(field string) string {
|
94
|
+
decoded, err := decode(field)
|
95
|
+
if err != nil {
|
96
|
+
return field
|
97
|
+
}
|
98
|
+
|
99
|
+
return encode(decoded)
|
100
|
+
}
|
101
|
+
|
102
|
+
func decode(field string) (interface{}, error) {
|
103
|
+
var intermediate interface{}
|
104
|
+
bytes := []byte(field)
|
105
|
+
|
106
|
+
if isHexEncoded(field) {
|
107
|
+
decoded_bytes, err := hex.DecodeString(field[2:])
|
108
|
+
if err != nil {
|
109
|
+
return "", err
|
110
|
+
}
|
111
|
+
|
112
|
+
err = msgpack.Unmarshal(decoded_bytes, &intermediate)
|
113
|
+
if err != nil {
|
114
|
+
return "", err
|
115
|
+
}
|
116
|
+
|
117
|
+
return intermediate, nil
|
118
|
+
}
|
119
|
+
|
120
|
+
err := json.Unmarshal(bytes, &intermediate)
|
121
|
+
if err != nil {
|
122
|
+
return "", err
|
123
|
+
}
|
124
|
+
|
125
|
+
return intermediate, nil
|
126
|
+
}
|
127
|
+
|
128
|
+
func encode(data interface{}) string {
|
129
|
+
result, err := json.Marshal(data)
|
130
|
+
if err != nil {
|
131
|
+
panic(err)
|
132
|
+
}
|
133
|
+
|
134
|
+
return string(result)
|
135
|
+
}
|
136
|
+
|
137
|
+
func expandFlow(field string) string {
|
138
|
+
intermediate, err := decode(field)
|
139
|
+
if err != nil {
|
140
|
+
return field
|
141
|
+
}
|
142
|
+
|
143
|
+
var result map[string]interface{}
|
144
|
+
switch intermediate.(type) {
|
145
|
+
// old style hash
|
146
|
+
case map[string]interface{}:
|
147
|
+
result = intermediate.(map[string]interface{})
|
148
|
+
// newer compact S-expression like representation
|
149
|
+
case []interface{}, float64:
|
150
|
+
result = expandCompactFlow(intermediate)
|
151
|
+
}
|
152
|
+
|
153
|
+
return encode(result)
|
154
|
+
}
|
155
|
+
|
156
|
+
func expandCompactFlow(flow interface{}) map[string]interface{} {
|
157
|
+
result := make(map[string]interface{})
|
158
|
+
switch flow.(type) {
|
159
|
+
case []interface{}:
|
160
|
+
switch flow.([]interface{})[0] {
|
161
|
+
case "S":
|
162
|
+
result["class"] = "Dynflow::Flows::Sequence"
|
163
|
+
case "C":
|
164
|
+
result["class"] = "Dynflow::Flows::Concurrence"
|
165
|
+
default:
|
166
|
+
panic("Unknown flow type")
|
167
|
+
}
|
168
|
+
var subflows []interface{}
|
169
|
+
for subflow := range flow.([]interface{})[1:] {
|
170
|
+
subflows = append(subflows, expandCompactFlow(subflow))
|
171
|
+
}
|
172
|
+
result["flows"] = subflows
|
173
|
+
case float64, int:
|
174
|
+
result["class"] = "Dynflow::Flows::Atom"
|
175
|
+
result["step_id"] = flow
|
176
|
+
default:
|
177
|
+
panic("Unknown flow type")
|
178
|
+
}
|
179
|
+
return result
|
180
|
+
}
|
@@ -9,14 +9,14 @@ module Dynflow
|
|
9
9
|
@step_id = action.run_step_id
|
10
10
|
end
|
11
11
|
|
12
|
-
def plan_event(event, time, sent = Concurrent::Promises.resolvable_future)
|
13
|
-
@world.plan_event(execution_plan_id, step_id, event, time, sent)
|
12
|
+
def plan_event(event, time, sent = Concurrent::Promises.resolvable_future, optional: false)
|
13
|
+
@world.plan_event(execution_plan_id, step_id, event, time, sent, optional: optional)
|
14
14
|
end
|
15
15
|
|
16
|
-
def event(event, sent = Concurrent::Promises.resolvable_future)
|
16
|
+
def event(event, sent = Concurrent::Promises.resolvable_future, optional: false)
|
17
17
|
# TODO: deprecate 2 levels backtrace (to know it's called from clock or internaly)
|
18
18
|
# remove lib/dynflow/clock.rb ClockReference#ping branch condition on removal.
|
19
|
-
plan_event(event, nil, sent)
|
19
|
+
plan_event(event, nil, sent, optional: optional)
|
20
20
|
end
|
21
21
|
|
22
22
|
def <<(event = nil)
|
data/lib/dynflow/action.rb
CHANGED
@@ -93,7 +93,8 @@ module Dynflow
|
|
93
93
|
fields! execution_plan_id: String,
|
94
94
|
step_id: Integer,
|
95
95
|
event: Object,
|
96
|
-
time: type { variants Time, NilClass }
|
96
|
+
time: type { variants Time, NilClass },
|
97
|
+
optional: Algebrick::Types::Boolean
|
97
98
|
end
|
98
99
|
|
99
100
|
def self.constantize(action_name)
|
@@ -104,7 +105,8 @@ module Dynflow
|
|
104
105
|
|
105
106
|
attr_reader :world, :phase, :execution_plan_id, :id, :input,
|
106
107
|
:plan_step_id, :run_step_id, :finalize_step_id,
|
107
|
-
:caller_execution_plan_id, :caller_action_id
|
108
|
+
:caller_execution_plan_id, :caller_action_id,
|
109
|
+
:pending_output_chunks
|
108
110
|
|
109
111
|
middleware.use Action::Progress::Calculate
|
110
112
|
|
@@ -132,6 +134,7 @@ module Dynflow
|
|
132
134
|
|
133
135
|
@input = OutputReference.deserialize getter.(:input, phase?(Run, Finalize, Present))
|
134
136
|
@output = OutputReference.deserialize getter.(:output, false) if phase? Run, Finalize, Present
|
137
|
+
@pending_output_chunks = [] if phase? Run, Finalize
|
135
138
|
end
|
136
139
|
|
137
140
|
def phase?(*phases)
|
@@ -168,6 +171,14 @@ module Dynflow
|
|
168
171
|
end
|
169
172
|
end
|
170
173
|
|
174
|
+
def output_chunk(chunk, kind: nil, timestamp: Time.now)
|
175
|
+
@pending_output_chunks << { chunk: chunk, kind: kind, timestamp: timestamp }
|
176
|
+
end
|
177
|
+
|
178
|
+
def stored_output_chunks
|
179
|
+
@output_chunks ||= world.persistence.load_output_chunks(@execution_plan_id, @id)
|
180
|
+
end
|
181
|
+
|
171
182
|
def caller_action
|
172
183
|
phase! Present
|
173
184
|
return nil if @caller_action_id
|
@@ -332,9 +343,9 @@ module Dynflow
|
|
332
343
|
|
333
344
|
# Plan an +event+ to be send to the action defined by +action+, what defaults to be self.
|
334
345
|
# if +time+ is not passed, event is sent as soon as possible.
|
335
|
-
def plan_event(event, time = nil, execution_plan_id: self.execution_plan_id, step_id: self.run_step_id)
|
346
|
+
def plan_event(event, time = nil, execution_plan_id: self.execution_plan_id, step_id: self.run_step_id, optional: false)
|
336
347
|
time = @world.clock.current_time + time if time.is_a?(Numeric)
|
337
|
-
delayed_events << DelayedEvent[execution_plan_id, step_id, event, time]
|
348
|
+
delayed_events << DelayedEvent[execution_plan_id, step_id, event, time, optional]
|
338
349
|
end
|
339
350
|
|
340
351
|
def delayed_events
|
data/lib/dynflow/clock.rb
CHANGED
@@ -114,11 +114,11 @@ module Dynflow
|
|
114
114
|
Time.now
|
115
115
|
end
|
116
116
|
|
117
|
-
def ping(who, time, with_what = nil, where =
|
117
|
+
def ping(who, time, with_what = nil, where = :<<, optional: false)
|
118
118
|
Type! time, Time, Numeric
|
119
119
|
time = current_time + time if time.is_a? Numeric
|
120
120
|
if who.is_a?(Action::Suspended)
|
121
|
-
who.plan_event(with_what, time)
|
121
|
+
who.plan_event(with_what, time, optional: optional)
|
122
122
|
else
|
123
123
|
timer = Clock::Timer[who, time, with_what.nil? ? Algebrick::Types::None : Some[Object][with_what], where]
|
124
124
|
self.tell([:add_timer, timer])
|
@@ -46,24 +46,21 @@ module Dynflow
|
|
46
46
|
|
47
47
|
def process(delayed_plans, check_time)
|
48
48
|
processed_plan_uuids = []
|
49
|
+
dispatched_plan_uuids = []
|
50
|
+
planning_locks = world.coordinator.find_records(class: Coordinator::PlanningLock.name)
|
49
51
|
delayed_plans.each do |plan|
|
50
|
-
next if plan.frozen
|
52
|
+
next if plan.frozen || locked_for_planning?(planning_locks, plan)
|
51
53
|
fix_plan_state(plan)
|
52
54
|
with_error_handling do
|
53
55
|
if plan.execution_plan.state != :scheduled
|
54
56
|
# in case the previous process was terminated after running the plan, but before deleting the delayed plan record.
|
55
57
|
@logger.info("Execution plan #{plan.execution_plan_uuid} is expected to be in 'scheduled' state, was '#{plan.execution_plan.state}', skipping")
|
56
|
-
|
57
|
-
@logger.debug "Failing plan #{plan.execution_plan_uuid}"
|
58
|
-
plan.timeout
|
58
|
+
processed_plan_uuids << plan.execution_plan_uuid
|
59
59
|
else
|
60
60
|
@logger.debug "Executing plan #{plan.execution_plan_uuid}"
|
61
|
-
|
62
|
-
|
63
|
-
plan.execute
|
64
|
-
end
|
61
|
+
world.plan_request(plan.execution_plan_uuid)
|
62
|
+
dispatched_plan_uuids << plan.execution_plan_uuid
|
65
63
|
end
|
66
|
-
processed_plan_uuids << plan.execution_plan_uuid
|
67
64
|
end
|
68
65
|
end
|
69
66
|
world.persistence.delete_delayed_plans(:execution_plan_uuid => processed_plan_uuids) unless processed_plan_uuids.empty?
|
@@ -72,6 +69,7 @@ module Dynflow
|
|
72
69
|
private
|
73
70
|
|
74
71
|
# handle the case, where the process was termintated while planning was in progress before
|
72
|
+
# TODO: Doing execution plan updates in orchestrator is bad
|
75
73
|
def fix_plan_state(plan)
|
76
74
|
if plan.execution_plan.state == :planning
|
77
75
|
@logger.info("Execution plan #{plan.execution_plan_uuid} is expected to be in 'scheduled' state, was '#{plan.execution_plan.state}', auto-fixing")
|
@@ -79,6 +77,10 @@ module Dynflow
|
|
79
77
|
plan.execution_plan.save
|
80
78
|
end
|
81
79
|
end
|
80
|
+
|
81
|
+
def locked_for_planning?(planning_locks, plan)
|
82
|
+
planning_locks.any? { |lock| lock.execution_plan_id == plan.execution_plan_uuid }
|
83
|
+
end
|
82
84
|
end
|
83
85
|
end
|
84
86
|
end
|
data/lib/dynflow/director.rb
CHANGED
@@ -15,7 +15,8 @@ module Dynflow
|
|
15
15
|
execution_plan_id: String,
|
16
16
|
step_id: Integer,
|
17
17
|
event: Object,
|
18
|
-
result: Concurrent::Promises::ResolvableFuture
|
18
|
+
result: Concurrent::Promises::ResolvableFuture,
|
19
|
+
optional: Algebrick::Types::Boolean
|
19
20
|
end
|
20
21
|
|
21
22
|
UnprocessableEvent = Class.new(Dynflow::Error)
|
@@ -52,7 +53,7 @@ module Dynflow
|
|
52
53
|
end
|
53
54
|
|
54
55
|
def self.new_from_hash(hash, *_args)
|
55
|
-
self.new(hash[:execution_plan_id], hash[:queue])
|
56
|
+
self.new(hash[:execution_plan_id], hash[:queue], hash[:sender_orchestrator_id])
|
56
57
|
end
|
57
58
|
end
|
58
59
|
|
@@ -107,6 +108,26 @@ module Dynflow
|
|
107
108
|
end
|
108
109
|
end
|
109
110
|
|
111
|
+
class PlanningWorkItem < WorkItem
|
112
|
+
def execute
|
113
|
+
plan = world.persistence.load_delayed_plan(execution_plan_id)
|
114
|
+
return if plan.nil? || plan.execution_plan.state != :scheduled
|
115
|
+
|
116
|
+
if !plan.start_before.nil? && plan.start_before < Time.now.utc()
|
117
|
+
plan.timeout
|
118
|
+
return
|
119
|
+
end
|
120
|
+
|
121
|
+
world.coordinator.acquire(Coordinator::PlanningLock.new(world, plan.execution_plan_uuid)) do
|
122
|
+
plan.plan
|
123
|
+
end
|
124
|
+
plan.execute
|
125
|
+
rescue => e
|
126
|
+
world.logger.warn e.message
|
127
|
+
world.logger.debug e.backtrace.join("\n")
|
128
|
+
end
|
129
|
+
end
|
130
|
+
|
110
131
|
class FinalizeWorkItem < WorkItem
|
111
132
|
attr_reader :finalize_steps_data
|
112
133
|
|
@@ -146,12 +167,18 @@ module Dynflow
|
|
146
167
|
@logger = world.logger
|
147
168
|
@execution_plan_managers = {}
|
148
169
|
@rescued_steps = {}
|
170
|
+
@planning_plans = []
|
149
171
|
end
|
150
172
|
|
151
173
|
def current_execution_plan_ids
|
152
174
|
@execution_plan_managers.keys
|
153
175
|
end
|
154
176
|
|
177
|
+
def handle_planning(execution_plan_uuid)
|
178
|
+
@planning_plans << execution_plan_uuid
|
179
|
+
[PlanningWorkItem.new(execution_plan_uuid, :default, @world.id)]
|
180
|
+
end
|
181
|
+
|
155
182
|
def start_execution(execution_plan_id, finished)
|
156
183
|
manager = track_execution_plan(execution_plan_id, finished)
|
157
184
|
return [] unless manager
|
@@ -163,6 +190,9 @@ module Dynflow
|
|
163
190
|
execution_plan_manager = @execution_plan_managers[event.execution_plan_id]
|
164
191
|
if execution_plan_manager
|
165
192
|
execution_plan_manager.event(event)
|
193
|
+
elsif event.optional
|
194
|
+
event.result.reject "no manager for #{event.inspect}"
|
195
|
+
[]
|
166
196
|
else
|
167
197
|
raise Dynflow::Error, "no manager for #{event.inspect}"
|
168
198
|
end
|
@@ -172,9 +202,16 @@ module Dynflow
|
|
172
202
|
end
|
173
203
|
|
174
204
|
def work_finished(work)
|
175
|
-
|
176
|
-
|
177
|
-
|
205
|
+
case work
|
206
|
+
when PlanningWorkItem
|
207
|
+
@planning_plans.delete(work.execution_plan_id)
|
208
|
+
@world.persistence.delete_delayed_plans(:execution_plan_uuid => work.execution_plan_id)
|
209
|
+
[]
|
210
|
+
else
|
211
|
+
manager = @execution_plan_managers[work.execution_plan_id]
|
212
|
+
return [] unless manager # skip case when getting event from execution plan that is not running anymore
|
213
|
+
unless_done(manager, manager.what_is_next(work))
|
214
|
+
end
|
178
215
|
end
|
179
216
|
|
180
217
|
# called when there was an unhandled exception during the execution
|
@@ -132,11 +132,13 @@ module Dynflow
|
|
132
132
|
end
|
133
133
|
|
134
134
|
def dispatch_request(request, client_world_id, request_id)
|
135
|
+
ignore_unknown = false
|
135
136
|
executor_id = match request,
|
136
|
-
(on ~Execution do |execution|
|
137
|
+
(on ~Execution | ~Planning do |execution|
|
137
138
|
AnyExecutor
|
138
139
|
end),
|
139
140
|
(on ~Event do |event|
|
141
|
+
ignore_unknown = event.optional
|
140
142
|
find_executor(event.execution_plan_id)
|
141
143
|
end),
|
142
144
|
(on Ping.(~any, ~any) | Status.(~any, ~any) do |receiver_id, _|
|
@@ -144,7 +146,11 @@ module Dynflow
|
|
144
146
|
end)
|
145
147
|
envelope = Envelope[request_id, client_world_id, executor_id, request]
|
146
148
|
if Dispatcher::UnknownWorld === envelope.receiver_id
|
147
|
-
raise Dynflow::Error, "Could not find an executor for #{envelope}"
|
149
|
+
raise Dynflow::Error, "Could not find an executor for #{envelope}" unless ignore_unknown
|
150
|
+
|
151
|
+
message = "Could not find an executor for optional #{envelope}, discarding."
|
152
|
+
log(Logger::DEBUG, message)
|
153
|
+
return respond(envelope, Failed[message])
|
148
154
|
end
|
149
155
|
connector.send(envelope).value!
|
150
156
|
rescue => e
|