dynflow 1.4.9 → 1.6.3

Sign up to get free protection for your applications and to get access to all the features.
Files changed (54) hide show
  1. checksums.yaml +4 -4
  2. data/{test/prepare_travis_env.sh → .github/install_dependencies.sh} +2 -2
  3. data/.github/workflows/release.yml +48 -0
  4. data/.github/workflows/ruby.yml +116 -0
  5. data/Gemfile +1 -1
  6. data/dynflow.gemspec +1 -0
  7. data/examples/chunked_output_benchmark.rb +77 -0
  8. data/extras/expand/Dockerfile +9 -0
  9. data/extras/expand/README.md +25 -0
  10. data/extras/expand/go.mod +5 -0
  11. data/extras/expand/go.sum +11 -0
  12. data/extras/expand/main.go +66 -0
  13. data/lib/dynflow/action.rb +11 -1
  14. data/lib/dynflow/delayed_executors/abstract_core.rb +11 -9
  15. data/lib/dynflow/director.rb +37 -4
  16. data/lib/dynflow/dispatcher/client_dispatcher.rb +1 -1
  17. data/lib/dynflow/dispatcher/executor_dispatcher.rb +8 -0
  18. data/lib/dynflow/dispatcher.rb +5 -1
  19. data/lib/dynflow/execution_history.rb +1 -1
  20. data/lib/dynflow/execution_plan/hooks.rb +1 -1
  21. data/lib/dynflow/execution_plan/steps/abstract_flow_step.rb +1 -0
  22. data/lib/dynflow/execution_plan.rb +16 -5
  23. data/lib/dynflow/executors/abstract/core.rb +9 -0
  24. data/lib/dynflow/executors/parallel.rb +4 -0
  25. data/lib/dynflow/extensions/msgpack.rb +41 -0
  26. data/lib/dynflow/extensions.rb +6 -0
  27. data/lib/dynflow/flows/abstract.rb +14 -0
  28. data/lib/dynflow/flows/abstract_composed.rb +2 -7
  29. data/lib/dynflow/flows/atom.rb +2 -2
  30. data/lib/dynflow/flows/concurrence.rb +2 -0
  31. data/lib/dynflow/flows/registry.rb +32 -0
  32. data/lib/dynflow/flows/sequence.rb +2 -0
  33. data/lib/dynflow/flows.rb +1 -0
  34. data/lib/dynflow/persistence.rb +10 -0
  35. data/lib/dynflow/persistence_adapters/sequel.rb +51 -16
  36. data/lib/dynflow/persistence_adapters/sequel_migrations/021_create_output_chunks.rb +30 -0
  37. data/lib/dynflow/persistence_adapters/sequel_migrations/022_store_flows_as_msgpack.rb +90 -0
  38. data/lib/dynflow/persistence_adapters/sequel_migrations/023_sqlite_workarounds.rb +19 -0
  39. data/lib/dynflow/serializable.rb +2 -2
  40. data/lib/dynflow/testing/dummy_coordinator.rb +10 -0
  41. data/lib/dynflow/testing/dummy_planned_action.rb +4 -0
  42. data/lib/dynflow/testing/dummy_world.rb +2 -1
  43. data/lib/dynflow/testing.rb +1 -0
  44. data/lib/dynflow/version.rb +1 -1
  45. data/lib/dynflow/world.rb +12 -0
  46. data/lib/dynflow.rb +2 -1
  47. data/test/execution_plan_hooks_test.rb +36 -0
  48. data/test/extensions_test.rb +42 -0
  49. data/test/flows_test.rb +44 -0
  50. data/test/future_execution_test.rb +6 -3
  51. data/test/persistence_test.rb +2 -2
  52. data/web/views/flow_step.erb +1 -0
  53. metadata +42 -5
  54. data/.travis.yml +0 -33
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 4f677a2e5d7f119264258b7b36fe187203db60b8b4df4136beb73954857d59bd
4
- data.tar.gz: a7cd75185cc99ce4a1e3e18e07565542ebf24bec8109a3291e78c56345bbc0db
3
+ metadata.gz: 40e7ab36f1ef2943d1cbebccde747a2c89186a620493a74bfefb06a810e8bf13
4
+ data.tar.gz: c2d4da5a39382f4df50ac0a1664edab9b772f94fc6da4f7f5eec51c24b91e256
5
5
  SHA512:
6
- metadata.gz: c00baa2c020df6b035aa5a45aaa1b58a08ccc310752f4de0ab54da0207a03439cfde09132a7e8a6f4fe7911058a027e87f5cdb7b2563da035725ba5b482d0ab4
7
- data.tar.gz: 4189a3f752642000d4eda05a8d82013d350eb77f7325450dfa01e774672aa33c61f3b32a1c8d5eb75e030d2fae1cb42a1e92da3278278569ba250245b06c3aec
6
+ metadata.gz: 955818d3401e8641df7ad06ad0307791502437ac3b2549dee9d1161c8dd29d76e83f03b378b6c451547d2e1a00f0b8bbdfa282ea9b6e3fc37005e1fde59ebff6
7
+ data.tar.gz: 2cb868cab752e95928521d1276bca3091f3ebbd55437e80a82baf980c636880dea3bdb3b345252e2735ffe236960c9d9c6d78a471acc9462c8341581ec91ff98
@@ -1,5 +1,7 @@
1
1
  #!/usr/bin/env bash
2
2
 
3
+ set -x
4
+
3
5
  echo "Setting the environment to use ${DB} database"
4
6
 
5
7
  BUNDLE_CONFIG=.bundle/config
@@ -12,11 +14,9 @@ EOF
12
14
  case $DB in
13
15
  mysql)
14
16
  sed -i 's/:mysql//'g $BUNDLE_CONFIG
15
- mysql -e 'create database travis_ci_test;'
16
17
  ;;
17
18
  postgresql)
18
19
  sed -i 's/:postgresql//'g $BUNDLE_CONFIG
19
- psql -c 'create database travis_ci_test;' -U postgres
20
20
  ;;
21
21
  sqlite3)
22
22
  # the tests are by default using sqlite3: do nothing
@@ -0,0 +1,48 @@
1
+ # workflow name
2
+ name: Generate release-artifacts
3
+
4
+ # on events
5
+ on:
6
+ push:
7
+ tags:
8
+ - '*'
9
+
10
+ # workflow tasks
11
+ jobs:
12
+ generate:
13
+ name: Generate build artifacts
14
+ runs-on: ubuntu-latest
15
+ steps:
16
+ - uses: olegtarasov/get-tag@v2.1
17
+ id: tagName
18
+ with:
19
+ tagRegex: "v(.*)" # Optional. Returns specified group text as tag name. Full tag string is returned if regex is not defined.
20
+ tagRegexGroup: 1 # Optional. Default is 1.
21
+ - name: Checkout the repository
22
+ uses: actions/checkout@v2
23
+ - name: Generate build files
24
+ run: |
25
+ mkdir -p dist
26
+ cd extras/expand
27
+ go build -o ../../dist/dynflow-expand-${VERSION}-x86_64
28
+ env:
29
+ VERSION: '${{ steps.tagName.outputs.tag }}'
30
+ - name: Generate distribution tarball
31
+ run: |
32
+ cd extras/expand
33
+ go mod vendor
34
+ tar --create \
35
+ --gzip \
36
+ --file ../../dist/dynflow-expand-${VERSION}.tar.gz \
37
+ --transform s/^\./dynflow-expand-${VERSION}/ \
38
+ .
39
+ env:
40
+ VERSION: '${{ steps.tagName.outputs.tag }}'
41
+ - name: Upload binaries to release
42
+ uses: svenstaro/upload-release-action@v2
43
+ with:
44
+ repo_token: ${{ secrets.GITHUB_TOKEN }}
45
+ file: dist/*
46
+ tag: ${{ github.ref }}
47
+ overwrite: true
48
+ file_glob: true
@@ -0,0 +1,116 @@
1
+ # This workflow uses actions that are not certified by GitHub.
2
+ # They are provided by a third-party and are governed by
3
+ # separate terms of service, privacy policy, and support
4
+ # documentation.
5
+ # This workflow will download a prebuilt Ruby version, install dependencies and run tests with Rake
6
+ # For more information see: https://github.com/marketplace/actions/setup-ruby-jruby-and-truffleruby
7
+
8
+ name: Ruby
9
+
10
+ on: [pull_request]
11
+
12
+ env:
13
+ TESTOPTS: --verbose
14
+
15
+ jobs:
16
+ rubocop:
17
+ runs-on: ubuntu-latest
18
+ steps:
19
+ - uses: actions/checkout@v2
20
+ - name: Setup Ruby
21
+ uses: ruby/setup-ruby@v1
22
+ with:
23
+ ruby-version: 2.7
24
+ - name: Setup
25
+ run: |
26
+ gem install bundler
27
+ bundle install --jobs=3 --retry=3
28
+ - name: Run rubocop
29
+ run: bundle exec rubocop
30
+
31
+ test:
32
+ runs-on: ubuntu-latest
33
+ needs: rubocop
34
+ strategy:
35
+ fail-fast: false
36
+ matrix:
37
+ ruby_version:
38
+ - 2.5.0
39
+ - 2.6.0
40
+ - 2.7.0
41
+ - 3.0.0
42
+ concurrent_ruby_ext:
43
+ - 'true'
44
+ - 'false'
45
+ db:
46
+ - postgresql
47
+ - mysql
48
+ - sqlite3
49
+ include:
50
+ - db: postgresql
51
+ conn_string: postgres://postgres@localhost/travis_ci_test
52
+ - db: mysql
53
+ conn_string: mysql2://root@127.0.0.1/travis_ci_test
54
+ - db: sqlite3
55
+ conn_string: sqlite:/
56
+ exclude:
57
+ - db: mysql
58
+ ruby_version: 2.5.0
59
+ - db: mysql
60
+ ruby_version: 2.6.0
61
+ - db: mysql
62
+ ruby_version: 3.0.0
63
+ - db: mysql
64
+ concurrent_ruby_ext: 'true'
65
+ - db: sqlite3
66
+ ruby_version: 2.5.0
67
+ - db: sqlite3
68
+ ruby_version: 2.6.0
69
+ - db: sqlite3
70
+ ruby_version: 3.0.0
71
+ - db: sqlite3
72
+ concurrent_ruby_ext: 'true'
73
+ - db: postgresql
74
+ ruby_version: 2.5.0
75
+ concurrent_ruby_ext: 'true'
76
+ - db: postgresql
77
+ ruby_version: 2.6.0
78
+ concurrent_ruby_ext: 'true'
79
+ - db: postgresql
80
+ ruby_version: 3.0.0
81
+ concurrent_ruby_ext: 'true'
82
+
83
+ services:
84
+ postgres:
85
+ image: postgres:12.1
86
+ ports: ['5432:5432']
87
+ options: --health-cmd pg_isready --health-interval 10s --health-timeout 5s --health-retries 5
88
+ env:
89
+ POSTGRES_DB: travis_ci_test
90
+ mariadb:
91
+ image: mariadb:10
92
+ ports: ['3306:3306']
93
+ env:
94
+ MYSQL_ALLOW_EMPTY_PASSWORD: 'yes'
95
+ MYSQL_DATABASE: travis_ci_test
96
+ redis:
97
+ image: redis:latest
98
+ ports: ['6379:6379']
99
+
100
+ env:
101
+ DB: ${{ matrix.db }}
102
+ DB_CONN_STRING: ${{ matrix.conn_string }}
103
+ CONCURRENT_RUBY_EXT: "${{ matrix.concurrent_ruby_ext }}"
104
+
105
+ steps:
106
+ - uses: actions/checkout@v2
107
+ - name: Set up Ruby
108
+ # To automatically get bug fixes and new Ruby versions for ruby/setup-ruby,
109
+ # change this to (see https://github.com/ruby/setup-ruby#versioning):
110
+ uses: ruby/setup-ruby@v1
111
+ with:
112
+ ruby-version: ${{ matrix.ruby_version }}
113
+ - name: Install dependencies
114
+ run: .github/install_dependencies.sh
115
+ - name: Run tests
116
+ run: bundle exec rake test
data/Gemfile CHANGED
@@ -35,7 +35,7 @@ end
35
35
 
36
36
  group :rails do
37
37
  gem 'daemons'
38
- gem 'rails', '>= 4.2.9'
38
+ gem 'rails', '>= 4.2.9', '< 7'
39
39
  gem 'logging'
40
40
  end
41
41
 
data/dynflow.gemspec CHANGED
@@ -20,6 +20,7 @@ Gem::Specification.new do |s|
20
20
  s.required_ruby_version = '>= 2.3.0'
21
21
 
22
22
  s.add_dependency "multi_json"
23
+ s.add_dependency "msgpack", '~> 1.3', '>= 1.3.3'
23
24
  s.add_dependency "apipie-params"
24
25
  s.add_dependency "algebrick", '~> 0.7.0'
25
26
  s.add_dependency "concurrent-ruby", '~> 1.1.3'
@@ -0,0 +1,77 @@
1
+ #!/usr/bin/env ruby
2
+ # frozen_string_literal: true
3
+
4
+ require_relative 'example_helper'
5
+ require 'benchmark'
6
+
7
+ WORDS = File.readlines('/usr/share/dict/words').map(&:chomp).freeze
8
+ COUNT = WORDS.count
9
+
10
+ module Common
11
+ def main_loop
12
+ if output[:current] < input[:limit]
13
+ consumed = yield
14
+ output[:current] += consumed
15
+ plan_event(nil)
16
+ suspend
17
+ end
18
+ end
19
+
20
+ def batch
21
+ WORDS.drop(output[:current]).take(input[:chunk])
22
+ end
23
+ end
24
+
25
+ class Regular < ::Dynflow::Action
26
+ include Common
27
+
28
+ def run(event = nil)
29
+ output[:current] ||= 0
30
+ output[:words] ||= []
31
+
32
+ main_loop do
33
+ words = batch
34
+ output[:words] << words
35
+ words.count
36
+ end
37
+ end
38
+ end
39
+
40
+ class Chunked < ::Dynflow::Action
41
+ include Common
42
+
43
+ def run(event = nil)
44
+ output[:current] ||= 0
45
+
46
+ main_loop do
47
+ words = batch
48
+ output_chunk(words)
49
+ words.count
50
+ end
51
+ end
52
+ end
53
+
54
+ if $0 == __FILE__
55
+ ExampleHelper.world.action_logger.level = 4
56
+ ExampleHelper.world.logger.level = 4
57
+
58
+ Benchmark.bm do |bm|
59
+ bm.report('regular 1000 by 100') { ExampleHelper.world.trigger(Regular, limit: 1000, chunk: 100).finished.wait }
60
+ bm.report('chunked 1000 by 100') { ExampleHelper.world.trigger(Chunked, limit: 1000, chunk: 100).finished.wait }
61
+
62
+ bm.report('regular 10_000 by 100') { ExampleHelper.world.trigger(Regular, limit: 10_000, chunk: 100).finished.wait }
63
+ bm.report('chunked 10_000 by 100') { ExampleHelper.world.trigger(Chunked, limit: 10_000, chunk: 100).finished.wait }
64
+
65
+ bm.report('regular 10_000 by 1000') { ExampleHelper.world.trigger(Regular, limit: 10_000, chunk: 1000).finished.wait }
66
+ bm.report('chunked 10_000 by 1000') { ExampleHelper.world.trigger(Chunked, limit: 10_000, chunk: 1000).finished.wait }
67
+
68
+ bm.report('regular 100_000 by 100') { ExampleHelper.world.trigger(Regular, limit: 100_000, chunk: 100).finished.wait }
69
+ bm.report('chunked 100_000 by 100') { ExampleHelper.world.trigger(Chunked, limit: 100_000, chunk: 100).finished.wait }
70
+
71
+ bm.report('regular 100_000 by 1000') { ExampleHelper.world.trigger(Regular, limit: 100_000, chunk: 1000).finished.wait }
72
+ bm.report('chunked 100_000 by 1000') { ExampleHelper.world.trigger(Chunked, limit: 100_000, chunk: 1000).finished.wait }
73
+
74
+ bm.report('regular 100_000 by 10_000') { ExampleHelper.world.trigger(Regular, limit: 100_000, chunk: 10_000).finished.wait }
75
+ bm.report('chunked 100_000 by 10_000') { ExampleHelper.world.trigger(Chunked, limit: 100_000, chunk: 10_000).finished.wait }
76
+ end
77
+ end
@@ -0,0 +1,9 @@
1
+ FROM alpine:3.15 as builder
2
+ RUN apk add -U go
3
+ ADD ./ work/
4
+ RUN cd /work && \
5
+ go build
6
+
7
+ FROM scratch
8
+ COPY --from=builder /work/expand /expand
9
+ CMD ["/expand"]
@@ -0,0 +1,25 @@
1
+ # expand
2
+
3
+ For a long time, Dynflow's database schema remained stable. To optimize Dynflow
4
+ a bit, we started changing it. One of the changes was changing how we encode
5
+ flows, resulting in flows taking roughly 10x less space.
6
+
7
+ The other change is not merged yet, but has potentionally bigger impact. We
8
+ store certain columns as JSON objects. The upcoming change uses msgpack instead
9
+ of JSON, resulting in faster encoding and decoding times and smaller storage
10
+ footprint when encoded. The drawback is it is a binary format, so if someone
11
+ dumps the tables from DB as CSV, they won't be human readable.
12
+
13
+ This tool processes CSV DB dumps and decodes msgpack to json.
14
+
15
+ ## Usage
16
+
17
+ ```shell
18
+ # cat dynflow_execution_plans.csv
19
+ 2065cc55-6b03-44b7-947a-e999dcb9057f,,stopped,error,,2021-04-16 09:50:33.826,0,0,,Dynflow::ExecutionPlan,1,\x91a143,\x91a153,\x9283a474696d65ce60795de9a46e616d65a564656c6179a8776f726c645f6964d92435626536643435662d363732342d343666652d393035662d34363565316466346561306183a474696d65ce60795de9a46e616d65a774696d656f7574a8776f726c645f6964d92435626536643435662d363732342d343666652d393035662d343635653164663465613061,\x9101
20
+ 6667374a-beab-4b0b-80c8-3d0392cdde40,,scheduled,pending,,,0,,,Dynflow::ExecutionPlan,1,\x91a143,\x91a153,\x9183a474696d65ce60795de9a46e616d65a564656c6179a8776f726c645f6964d92435626536643435662d363732342d343666652d393035662d343635653164663465613061,\x9101
21
+
22
+ # expand < dynflow_execution_plans.csv
23
+ 2065cc55-6b03-44b7-947a-e999dcb9057f,,stopped,error,,2021-04-16 09:50:33.826,0,0,,Dynflow::ExecutionPlan,1,"[""C""]","[""S""]","[{""name"":""delay"",""time"":1618566633,""world_id"":""5be6d45f-6724-46fe-905f-465e1df4ea0a""},{""name"":""timeout"",""time"":1618566633,""world_id"":""5be6d45f-6724-46fe-905f-465e1df4ea0a""}]",[1]
24
+ 6667374a-beab-4b0b-80c8-3d0392cdde40,,scheduled,pending,,,0,,,Dynflow::ExecutionPlan,1,"[""C""]","[""S""]","[{""name"":""delay"",""time"":1618566633,""world_id"":""5be6d45f-6724-46fe-905f-465e1df4ea0a""}]",[1]
25
+ ```
@@ -0,0 +1,5 @@
1
+ module github.com/dynflow/dynflow/expand
2
+
3
+ go 1.15
4
+
5
+ require github.com/vmihailenco/msgpack/v5 v5.3.5
@@ -0,0 +1,11 @@
1
+ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
2
+ github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
3
+ github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
4
+ github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
5
+ github.com/vmihailenco/msgpack v3.3.3+incompatible h1:wapg9xDUZDzGCNFlwc5SqI1rvcciqcxEHac4CYj89xI=
6
+ github.com/vmihailenco/msgpack/v5 v5.3.5 h1:5gO0H1iULLWGhs2H5tbAHIZTV8/cYafcFOr9znI5mJU=
7
+ github.com/vmihailenco/msgpack/v5 v5.3.5/go.mod h1:7xyJ9e+0+9SaZT0Wt1RGleJXzli6Q/V5KbhBonMG9jc=
8
+ github.com/vmihailenco/tagparser/v2 v2.0.0 h1:y09buUbR+b5aycVFQs/g70pqKVZNBmxwAhO7/IwNM9g=
9
+ github.com/vmihailenco/tagparser/v2 v2.0.0/go.mod h1:Wri+At7QHww0WTrCBeu4J6bNtoV6mEfg5OIWRZA9qds=
10
+ gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
11
+ gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
@@ -0,0 +1,66 @@
1
+ package main
2
+
3
+ import (
4
+ "encoding/csv"
5
+ "encoding/hex"
6
+ "encoding/json"
7
+ "github.com/vmihailenco/msgpack/v5"
8
+ "io"
9
+ "os"
10
+ )
11
+
12
+ func main() {
13
+ reader := csv.NewReader(os.Stdin)
14
+ defer os.Stdin.Close()
15
+
16
+ writer := csv.NewWriter(os.Stdout)
17
+ defer os.Stdout.Close()
18
+ defer writer.Flush()
19
+
20
+ for {
21
+ record, err := reader.Read()
22
+ if err == io.EOF {
23
+ break
24
+ }
25
+
26
+ writer.Write(processRow(record))
27
+ }
28
+ }
29
+
30
+ func processRow(record []string) []string {
31
+ for i, r := range record {
32
+ if isHexEncoded(r) {
33
+ record[i] = reencodeField(r)
34
+ }
35
+ }
36
+
37
+ return record
38
+ }
39
+
40
+ func isHexEncoded(field string) bool {
41
+ return len(field) >= 2 && field[0:2] == "\\x"
42
+ }
43
+
44
+ func reencodeField(field string) string {
45
+ decoded_bytes, err := hex.DecodeString(field[2:])
46
+ if err != nil {
47
+ return field
48
+ }
49
+
50
+ var intermediate interface{}
51
+ err = msgpack.Unmarshal(decoded_bytes, &intermediate)
52
+ if err != nil {
53
+ return field
54
+ }
55
+
56
+ return encode(intermediate)
57
+ }
58
+
59
+ func encode(data interface{}) string {
60
+ result, err := json.Marshal(data)
61
+ if err != nil {
62
+ panic(err)
63
+ }
64
+
65
+ return string(result)
66
+ }
@@ -105,7 +105,8 @@ module Dynflow
105
105
 
106
106
  attr_reader :world, :phase, :execution_plan_id, :id, :input,
107
107
  :plan_step_id, :run_step_id, :finalize_step_id,
108
- :caller_execution_plan_id, :caller_action_id
108
+ :caller_execution_plan_id, :caller_action_id,
109
+ :pending_output_chunks
109
110
 
110
111
  middleware.use Action::Progress::Calculate
111
112
 
@@ -133,6 +134,7 @@ module Dynflow
133
134
 
134
135
  @input = OutputReference.deserialize getter.(:input, phase?(Run, Finalize, Present))
135
136
  @output = OutputReference.deserialize getter.(:output, false) if phase? Run, Finalize, Present
137
+ @pending_output_chunks = [] if phase? Run, Finalize
136
138
  end
137
139
 
138
140
  def phase?(*phases)
@@ -169,6 +171,14 @@ module Dynflow
169
171
  end
170
172
  end
171
173
 
174
+ def output_chunk(chunk, kind: nil, timestamp: Time.now)
175
+ @pending_output_chunks << { chunk: chunk, kind: kind, timestamp: timestamp }
176
+ end
177
+
178
+ def stored_output_chunks
179
+ @output_chunks ||= world.persistence.load_output_chunks(@execution_plan_id, @id)
180
+ end
181
+
172
182
  def caller_action
173
183
  phase! Present
174
184
  return nil if @caller_action_id
@@ -46,24 +46,21 @@ module Dynflow
46
46
 
47
47
  def process(delayed_plans, check_time)
48
48
  processed_plan_uuids = []
49
+ dispatched_plan_uuids = []
50
+ planning_locks = world.coordinator.find_records(class: Coordinator::PlanningLock.name)
49
51
  delayed_plans.each do |plan|
50
- next if plan.frozen
52
+ next if plan.frozen || locked_for_planning?(planning_locks, plan)
51
53
  fix_plan_state(plan)
52
54
  with_error_handling do
53
55
  if plan.execution_plan.state != :scheduled
54
56
  # in case the previous process was terminated after running the plan, but before deleting the delayed plan record.
55
57
  @logger.info("Execution plan #{plan.execution_plan_uuid} is expected to be in 'scheduled' state, was '#{plan.execution_plan.state}', skipping")
56
- elsif !plan.start_before.nil? && plan.start_before < check_time
57
- @logger.debug "Failing plan #{plan.execution_plan_uuid}"
58
- plan.timeout
58
+ processed_plan_uuids << plan.execution_plan_uuid
59
59
  else
60
60
  @logger.debug "Executing plan #{plan.execution_plan_uuid}"
61
- Executors.run_user_code do
62
- plan.plan
63
- plan.execute
64
- end
61
+ world.plan_request(plan.execution_plan_uuid)
62
+ dispatched_plan_uuids << plan.execution_plan_uuid
65
63
  end
66
- processed_plan_uuids << plan.execution_plan_uuid
67
64
  end
68
65
  end
69
66
  world.persistence.delete_delayed_plans(:execution_plan_uuid => processed_plan_uuids) unless processed_plan_uuids.empty?
@@ -72,6 +69,7 @@ module Dynflow
72
69
  private
73
70
 
74
71
  # handle the case, where the process was termintated while planning was in progress before
72
+ # TODO: Doing execution plan updates in orchestrator is bad
75
73
  def fix_plan_state(plan)
76
74
  if plan.execution_plan.state == :planning
77
75
  @logger.info("Execution plan #{plan.execution_plan_uuid} is expected to be in 'scheduled' state, was '#{plan.execution_plan.state}', auto-fixing")
@@ -79,6 +77,10 @@ module Dynflow
79
77
  plan.execution_plan.save
80
78
  end
81
79
  end
80
+
81
+ def locked_for_planning?(planning_locks, plan)
82
+ planning_locks.any? { |lock| lock.execution_plan_id == plan.execution_plan_uuid }
83
+ end
82
84
  end
83
85
  end
84
86
  end
@@ -53,7 +53,7 @@ module Dynflow
53
53
  end
54
54
 
55
55
  def self.new_from_hash(hash, *_args)
56
- self.new(hash[:execution_plan_id], hash[:queue])
56
+ self.new(hash[:execution_plan_id], hash[:queue], hash[:sender_orchestrator_id])
57
57
  end
58
58
  end
59
59
 
@@ -108,6 +108,26 @@ module Dynflow
108
108
  end
109
109
  end
110
110
 
111
+ class PlanningWorkItem < WorkItem
112
+ def execute
113
+ plan = world.persistence.load_delayed_plan(execution_plan_id)
114
+ return if plan.nil? || plan.execution_plan.state != :scheduled
115
+
116
+ if !plan.start_before.nil? && plan.start_before < Time.now.utc()
117
+ plan.timeout
118
+ return
119
+ end
120
+
121
+ world.coordinator.acquire(Coordinator::PlanningLock.new(world, plan.execution_plan_uuid)) do
122
+ plan.plan
123
+ end
124
+ plan.execute
125
+ rescue => e
126
+ world.logger.warn e.message
127
+ world.logger.debug e.backtrace.join("\n")
128
+ end
129
+ end
130
+
111
131
  class FinalizeWorkItem < WorkItem
112
132
  attr_reader :finalize_steps_data
113
133
 
@@ -147,12 +167,18 @@ module Dynflow
147
167
  @logger = world.logger
148
168
  @execution_plan_managers = {}
149
169
  @rescued_steps = {}
170
+ @planning_plans = []
150
171
  end
151
172
 
152
173
  def current_execution_plan_ids
153
174
  @execution_plan_managers.keys
154
175
  end
155
176
 
177
+ def handle_planning(execution_plan_uuid)
178
+ @planning_plans << execution_plan_uuid
179
+ [PlanningWorkItem.new(execution_plan_uuid, :default, @world.id)]
180
+ end
181
+
156
182
  def start_execution(execution_plan_id, finished)
157
183
  manager = track_execution_plan(execution_plan_id, finished)
158
184
  return [] unless manager
@@ -176,9 +202,16 @@ module Dynflow
176
202
  end
177
203
 
178
204
  def work_finished(work)
179
- manager = @execution_plan_managers[work.execution_plan_id]
180
- return [] unless manager # skip case when getting event from execution plan that is not running anymore
181
- unless_done(manager, manager.what_is_next(work))
205
+ case work
206
+ when PlanningWorkItem
207
+ @planning_plans.delete(work.execution_plan_id)
208
+ @world.persistence.delete_delayed_plans(:execution_plan_uuid => work.execution_plan_id)
209
+ []
210
+ else
211
+ manager = @execution_plan_managers[work.execution_plan_id]
212
+ return [] unless manager # skip case when getting event from execution plan that is not running anymore
213
+ unless_done(manager, manager.what_is_next(work))
214
+ end
182
215
  end
183
216
 
184
217
  # called when there was an unhandled exception during the execution
@@ -134,7 +134,7 @@ module Dynflow
134
134
  def dispatch_request(request, client_world_id, request_id)
135
135
  ignore_unknown = false
136
136
  executor_id = match request,
137
- (on ~Execution do |execution|
137
+ (on ~Execution | ~Planning do |execution|
138
138
  AnyExecutor
139
139
  end),
140
140
  (on ~Event do |event|
@@ -9,6 +9,7 @@ module Dynflow
9
9
 
10
10
  def handle_request(envelope)
11
11
  match(envelope.message,
12
+ on(Planning) { perform_planning(envelope, envelope.message)},
12
13
  on(Execution) { perform_execution(envelope, envelope.message) },
13
14
  on(Event) { perform_event(envelope, envelope.message) },
14
15
  on(Status) { get_execution_status(envelope, envelope.message) })
@@ -16,6 +17,13 @@ module Dynflow
16
17
 
17
18
  protected
18
19
 
20
+ def perform_planning(envelope, planning)
21
+ @world.executor.plan(planning.execution_plan_id)
22
+ respond(envelope, Accepted)
23
+ rescue Dynflow::Error => e
24
+ respond(envelope, Failed[e.message])
25
+ end
26
+
19
27
  def perform_execution(envelope, execution)
20
28
  allocate_executor(execution.execution_plan_id, envelope.sender_id, envelope.request_id)
21
29
  execution_lock = Coordinator::ExecutionLock.new(@world, execution.execution_plan_id, envelope.sender_id, envelope.request_id)
@@ -14,6 +14,10 @@ module Dynflow
14
14
  fields! execution_plan_id: String
15
15
  end
16
16
 
17
+ Planning = type do
18
+ fields! execution_plan_id: String
19
+ end
20
+
17
21
  Ping = type do
18
22
  fields! receiver_id: String,
19
23
  use_cache: type { variants TrueClass, FalseClass }
@@ -24,7 +28,7 @@ module Dynflow
24
28
  execution_plan_id: type { variants String, NilClass }
25
29
  end
26
30
 
27
- variants Event, Execution, Ping, Status
31
+ variants Event, Execution, Ping, Status, Planning
28
32
  end
29
33
 
30
34
  Response = Algebrick.type do
@@ -12,7 +12,7 @@ module Dynflow
12
12
 
13
13
  module Event
14
14
  def inspect
15
- "#{Time.at(time).utc}: #{name}".tap { |s| s << " @ #{world_id}" if world_id }
15
+ ["#{Time.at(time).utc}: #{name}", world_id].compact.join(' @ ')
16
16
  end
17
17
  end
18
18
 
@@ -21,7 +21,7 @@ module Dynflow
21
21
  # @param class_name [Class] class of the hook to be run
22
22
  # @param on [Symbol, Array<Symbol>] when should the hook be run, one of {HOOK_KINDS}
23
23
  # @return [void]
24
- def use(class_name, on: HOOK_KINDS)
24
+ def use(class_name, on: ExecutionPlan.states)
25
25
  on = Array[on] unless on.kind_of?(Array)
26
26
  validate_kinds!(on)
27
27
  if hooks[class_name]
@@ -31,6 +31,7 @@ module Dynflow
31
31
  action = persistence.load_action(self)
32
32
  yield action
33
33
  persistence.save_action(execution_plan_id, action)
34
+ persistence.save_output_chunks(execution_plan_id, action.id, action.pending_output_chunks)
34
35
  save
35
36
 
36
37
  return self