deimos-ruby 1.6.1 → 1.8.0.pre.beta1

Sign up to get free protection for your applications and to get access to all the features.
Files changed (67) hide show
  1. checksums.yaml +4 -4
  2. data/.circleci/config.yml +9 -0
  3. data/.rubocop.yml +15 -13
  4. data/.ruby-version +1 -1
  5. data/CHANGELOG.md +30 -0
  6. data/Gemfile.lock +87 -80
  7. data/README.md +139 -15
  8. data/Rakefile +1 -1
  9. data/deimos-ruby.gemspec +3 -2
  10. data/docs/ARCHITECTURE.md +144 -0
  11. data/docs/CONFIGURATION.md +27 -0
  12. data/lib/deimos.rb +7 -6
  13. data/lib/deimos/active_record_consume/batch_consumption.rb +159 -0
  14. data/lib/deimos/active_record_consume/batch_slicer.rb +27 -0
  15. data/lib/deimos/active_record_consume/message_consumption.rb +58 -0
  16. data/lib/deimos/active_record_consume/schema_model_converter.rb +52 -0
  17. data/lib/deimos/active_record_consumer.rb +33 -75
  18. data/lib/deimos/active_record_producer.rb +23 -0
  19. data/lib/deimos/batch_consumer.rb +2 -140
  20. data/lib/deimos/config/configuration.rb +28 -10
  21. data/lib/deimos/consume/batch_consumption.rb +148 -0
  22. data/lib/deimos/consume/message_consumption.rb +93 -0
  23. data/lib/deimos/consumer.rb +79 -69
  24. data/lib/deimos/kafka_message.rb +1 -1
  25. data/lib/deimos/kafka_source.rb +29 -23
  26. data/lib/deimos/kafka_topic_info.rb +1 -1
  27. data/lib/deimos/message.rb +6 -1
  28. data/lib/deimos/metrics/provider.rb +0 -2
  29. data/lib/deimos/poll_info.rb +9 -0
  30. data/lib/deimos/tracing/provider.rb +0 -2
  31. data/lib/deimos/utils/db_poller.rb +149 -0
  32. data/lib/deimos/utils/db_producer.rb +8 -3
  33. data/lib/deimos/utils/deadlock_retry.rb +68 -0
  34. data/lib/deimos/utils/lag_reporter.rb +19 -26
  35. data/lib/deimos/version.rb +1 -1
  36. data/lib/generators/deimos/db_poller/templates/migration +11 -0
  37. data/lib/generators/deimos/db_poller/templates/rails3_migration +16 -0
  38. data/lib/generators/deimos/db_poller_generator.rb +48 -0
  39. data/lib/tasks/deimos.rake +7 -0
  40. data/spec/active_record_batch_consumer_spec.rb +481 -0
  41. data/spec/active_record_consume/batch_slicer_spec.rb +42 -0
  42. data/spec/active_record_consume/schema_model_converter_spec.rb +105 -0
  43. data/spec/active_record_consumer_spec.rb +22 -11
  44. data/spec/active_record_producer_spec.rb +66 -88
  45. data/spec/batch_consumer_spec.rb +23 -7
  46. data/spec/config/configuration_spec.rb +4 -0
  47. data/spec/consumer_spec.rb +8 -8
  48. data/spec/deimos_spec.rb +57 -49
  49. data/spec/handlers/my_batch_consumer.rb +6 -1
  50. data/spec/handlers/my_consumer.rb +6 -1
  51. data/spec/kafka_source_spec.rb +53 -0
  52. data/spec/message_spec.rb +19 -0
  53. data/spec/producer_spec.rb +3 -3
  54. data/spec/rake_spec.rb +1 -1
  55. data/spec/schemas/com/my-namespace/MySchemaCompound-key.avsc +18 -0
  56. data/spec/schemas/com/my-namespace/Wibble.avsc +43 -0
  57. data/spec/spec_helper.rb +61 -6
  58. data/spec/utils/db_poller_spec.rb +320 -0
  59. data/spec/utils/deadlock_retry_spec.rb +74 -0
  60. data/spec/utils/lag_reporter_spec.rb +29 -22
  61. metadata +61 -20
  62. data/lib/deimos/base_consumer.rb +0 -104
  63. data/lib/deimos/utils/executor.rb +0 -124
  64. data/lib/deimos/utils/platform_schema_validation.rb +0 -0
  65. data/lib/deimos/utils/signal_handler.rb +0 -68
  66. data/spec/utils/executor_spec.rb +0 -53
  67. data/spec/utils/signal_handler_spec.rb +0 -16
data/README.md CHANGED
@@ -23,6 +23,7 @@ Built on Phobos and hence Ruby-Kafka.
23
23
  * [Consumers](#consumers)
24
24
  * [Rails Integration](#rails-integration)
25
25
  * [Database Backend](#database-backend)
26
+ * [Database Poller](#database-poller)
26
27
  * [Running Consumers](#running-consumers)
27
28
  * [Metrics](#metrics)
28
29
  * [Testing](#testing)
@@ -313,24 +314,14 @@ messages as an array and then process them together. This can improve
313
314
  consumer throughput, depending on the use case. Batch consumers behave like
314
315
  other consumers in regards to key and payload decoding, etc.
315
316
 
316
- To enable batch consumption, ensure that the `delivery` property is set to `inline_batch`. For example:
317
+ To enable batch consumption, ensure that the `delivery` property of your
318
+ consumer is set to `inline_batch`.
317
319
 
318
- ```ruby
319
- Deimos.configure do
320
- consumer do
321
- class_name 'Consumers::MyBatchConsumer'
322
- topic 'my_batched_topic'
323
- group_id 'my_group_id'
324
- delivery :inline_batch
325
- end
326
- end
327
- ```
328
-
329
- Batch consumers must inherit from the Deimos::BatchConsumer class as in
330
- this sample:
320
+ Batch consumers will invoke the `consume_batch` method instead of `consume`
321
+ as in this example:
331
322
 
332
323
  ```ruby
333
- class MyBatchConsumer < Deimos::BatchConsumer
324
+ class MyBatchConsumer < Deimos::Consumer
334
325
 
335
326
  def consume_batch(payloads, metadata)
336
327
  # payloads is an array of schema-decoded hashes.
@@ -532,12 +523,14 @@ class MyConsumer < Deimos::ActiveRecordConsumer
532
523
 
533
524
  # Optional override of the way to fetch records based on payload and
534
525
  # key. Default is to use the key to search the primary key of the table.
526
+ # Only used in non-batch mode.
535
527
  def fetch_record(klass, payload, key)
536
528
  super
537
529
  end
538
530
 
539
531
  # Optional override on how to set primary key for new records.
540
532
  # Default is to set the class's primary key to the message's decoded key.
533
+ # Only used in non-batch mode.
541
534
  def assign_key(record, payload, key)
542
535
  super
543
536
  end
@@ -545,6 +538,7 @@ class MyConsumer < Deimos::ActiveRecordConsumer
545
538
  # Optional override of the default behavior, which is to call `destroy`
546
539
  # on the record - e.g. you can replace this with "archiving" the record
547
540
  # in some way.
541
+ # Only used in non-batch mode.
548
542
  def destroy_record(record)
549
543
  super
550
544
  end
@@ -554,9 +548,136 @@ class MyConsumer < Deimos::ActiveRecordConsumer
554
548
  def record_attributes(payload)
555
549
  super.merge(:some_field => 'some_value')
556
550
  end
551
+
552
+ # Optional override to change the attributes used for identifying records
553
+ def record_key(payload)
554
+ super
555
+ end
557
556
  end
558
557
  ```
559
558
 
559
+ #### Batch Consumers
560
+
561
+ Deimos also provides a batch consumption mode for `ActiveRecordConsumer` which
562
+ processes groups of messages at once using the ActiveRecord backend.
563
+
564
+ Batch ActiveRecord consumers make use of the
565
+ [activerecord-import](https://github.com/zdennis/activerecord-import) to insert
566
+ or update multiple records in bulk SQL statements. This reduces processing
567
+ time at the cost of skipping ActiveRecord callbacks for individual records.
568
+ Deleted records (tombstones) are grouped into `delete_all` calls and thus also
569
+ skip `destroy` callbacks.
570
+
571
+ Batch consumption is used when the `delivery` setting for your consumer is set to `inline_batch`.
572
+
573
+ **Note**: Currently, batch consumption only supports only primary keys as identifiers out of the box. See
574
+ [the specs](spec/active_record_batch_consumer_spec.rb) for an example of how to use compound keys.
575
+
576
+ By default, batches will be compacted before processing, i.e. only the last
577
+ message for each unique key in a batch will actually be processed. To change
578
+ this behaviour, call `compacted false` inside of your consumer definition.
579
+
580
+ A sample batch consumer would look as follows:
581
+
582
+ ```ruby
583
+ class MyConsumer < Deimos::ActiveRecordConsumer
584
+ schema 'MySchema'
585
+ key_config field: 'my_field'
586
+ record_class Widget
587
+
588
+ # Controls whether the batch is compacted before consuming.
589
+ # If true, only the last message for each unique key in a batch will be
590
+ # processed.
591
+ # If false, messages will be grouped into "slices" of independent keys
592
+ # and each slice will be imported separately.
593
+ #
594
+ # compacted false
595
+
596
+
597
+ # Optional override of the default behavior, which is to call `delete_all`
598
+ # on the associated records - e.g. you can replace this with setting a deleted
599
+ # flag on the record.
600
+ def remove_records(records)
601
+ super
602
+ end
603
+
604
+ # Optional override to change the attributes of the record before they
605
+ # are saved.
606
+ def record_attributes(payload)
607
+ super.merge(:some_field => 'some_value')
608
+ end
609
+ end
610
+ ```
611
+
612
+ ## Database Poller
613
+
614
+ Another method of fetching updates from the database to Kafka is by polling
615
+ the database (a process popularized by [Kafka Connect](https://docs.confluent.io/current/connect/index.html)).
616
+ Deimos provides a database poller, which allows you the same pattern but
617
+ with all the flexibility of real Ruby code, and the added advantage of having
618
+ a single consistent framework to talk to Kafka.
619
+
620
+ One of the disadvantages of polling the database is that it can't detect deletions.
621
+ You can get over this by configuring a mixin to send messages *only* on deletion,
622
+ and use the poller to handle all other updates. You can reuse the same producer
623
+ for both cases to handle joins, changes/mappings, business logic, etc.
624
+
625
+ To enable the poller, generate the migration:
626
+
627
+ ```ruby
628
+ rails g deimos:db_poller
629
+ ```
630
+
631
+ Run the migration:
632
+
633
+ ```ruby
634
+ rails db:migrate
635
+ ```
636
+
637
+ Add the following configuration:
638
+
639
+ ```ruby
640
+ Deimos.configure do
641
+ db_poller do
642
+ producer_class 'MyProducer' # an ActiveRecordProducer
643
+ end
644
+ db_poller do
645
+ producer_class 'MyOtherProducer'
646
+ run_every 2.minutes
647
+ delay 5.seconds # to allow for transactions to finish
648
+ full_table true # if set, dump the entire table every run; use for small tables
649
+ end
650
+ end
651
+ ```
652
+
653
+ All the information around connecting and querying the database lives in the
654
+ producer itself, so you don't need to write any additional code. You can
655
+ define one additional method on the producer:
656
+
657
+ ```ruby
658
+ class MyProducer < Deimos::ActiveRecordProducer
659
+ ...
660
+ def poll_query(time_from:, time_to:, column_name:, min_id:)
661
+ # Default is to use the timestamp `column_name` to find all records
662
+ # between time_from and time_to, or records where `updated_at` is equal to
663
+ # `time_from` but its ID is greater than `min_id`. This is called
664
+ # successively as the DB is polled to ensure even if a batch ends in the
665
+ # middle of a timestamp, we won't miss any records.
666
+ # You can override or change this behavior if necessary.
667
+ end
668
+ end
669
+ ```
670
+
671
+ To run the DB poller:
672
+
673
+ rake deimos:db_poller
674
+
675
+ Note that the DB poller creates one thread per configured poller, and is
676
+ currently designed *not* to be scaled out - i.e. it assumes you will only
677
+ have one process running at a time. If a particular poll takes longer than
678
+ the poll interval (i.e. interval is set at 1 minute but it takes 75 seconds)
679
+ the next poll will begin immediately following the first one completing.
680
+
560
681
  ## Running consumers
561
682
 
562
683
  Deimos includes a rake task. Once it's in your gemfile, just run
@@ -783,6 +904,9 @@ Deimos::Utils::InlineConsumer.get_messages_for(
783
904
 
784
905
  Bug reports and pull requests are welcome on GitHub at https://github.com/flipp-oss/deimos .
785
906
 
907
+ We have more information on the [internal architecture](docs/ARCHITECTURE.md) of Deimos
908
+ for contributors!
909
+
786
910
  ### Linting
787
911
 
788
912
  Deimos uses Rubocop to lint the code. Please run Rubocop on your code
data/Rakefile CHANGED
@@ -6,7 +6,7 @@ begin
6
6
 
7
7
  RSpec::Core::RakeTask.new(:spec)
8
8
  task(default: :spec)
9
- rescue LoadError # rubocop:disable Lint/SuppressedException
9
+ rescue LoadError
10
10
  # no rspec available
11
11
  end
12
12
 
@@ -21,11 +21,12 @@ Gem::Specification.new do |spec|
21
21
  spec.add_runtime_dependency('avro_turf', '~> 0.11')
22
22
  spec.add_runtime_dependency('phobos', '~> 1.9')
23
23
  spec.add_runtime_dependency('ruby-kafka', '~> 0.7')
24
+ spec.add_runtime_dependency('sigurd', '0.0.1')
24
25
 
25
26
  spec.add_development_dependency('activerecord', '~> 5.2')
26
27
  spec.add_development_dependency('activerecord-import')
27
28
  spec.add_development_dependency('avro', '~> 1.9')
28
- spec.add_development_dependency('bundler', '~> 1')
29
+ spec.add_development_dependency('database_cleaner', '~> 1.7')
29
30
  spec.add_development_dependency('ddtrace', '~> 0.11')
30
31
  spec.add_development_dependency('dogstatsd-ruby', '~> 4.2')
31
32
  spec.add_development_dependency('guard', '~> 2')
@@ -33,7 +34,7 @@ Gem::Specification.new do |spec|
33
34
  spec.add_development_dependency('guard-rubocop', '~> 1')
34
35
  spec.add_development_dependency('mysql2', '~> 0.5')
35
36
  spec.add_development_dependency('pg', '~> 1.1')
36
- spec.add_development_dependency('rails', '~> 5.2')
37
+ spec.add_development_dependency('rails', '~> 5.2', '>= 5.2.4.2')
37
38
  spec.add_development_dependency('rake', '~> 13')
38
39
  spec.add_development_dependency('rspec', '~> 3')
39
40
  spec.add_development_dependency('rspec_junit_formatter', '~>0.3')
@@ -0,0 +1,144 @@
1
+ # Deimos Architecture
2
+
3
+ Deimos is the third of three libraries that add functionality on top of each
4
+ other:
5
+
6
+ * [RubyKafka](https://github.com/zendesk/ruby-kafka) is the low-level Kafka
7
+ client, providing API's for producers, consumers and the client as a whole.
8
+ * [Phobos](https://github.com/phobos/phobos) is a lightweight wrapper on top
9
+ of RubyKafka that provides threaded consumers, a simpler way to write
10
+ producers, and lifecycle management.
11
+ * [Deimos](https://github.com/flipp-oss/deimos/) is a full-featured framework
12
+ using Phobos as its base which provides schema integration (e.g. Avro),
13
+ database integration, metrics, tracing, test helpers and other utilities.
14
+
15
+ ## Folder structure
16
+
17
+ As of May 12, 2020, the following are the important files to understand in how
18
+ Deimos fits together:
19
+ * `lib/generators`: Generators to generate database migrations, e.g.
20
+ for the DB Poller and DB Producer features.
21
+ * `lib/tasks`: Rake tasks for starting consumers, DB Pollers, etc.
22
+ * `lib/deimos`: Main Deimos code.
23
+ * `lib/deimos/deimos.rb`: The bootstrap / startup code for Deimos. Also provides
24
+ some global convenience methods and (for legacy purposes) the way to
25
+ start the DB Producer.
26
+ * `lib/deimos/backends`: The different plug-in producer backends - e.g. produce
27
+ directly to Kafka, use the DB backend, etc.
28
+ * `lib/deimos/schema_backends`: The different plug-in schema handlers, such
29
+ as the various flavors of Avro (with/without schema registry etc.)
30
+ * `lib/deimos/metrics`: The different plug-in metrics providers, e.g. Datadog.
31
+ * `lib/deimos/tracing`: The different plug-in tracing providers, e.g. Datadog.
32
+ * `lib/deimos/utils`: Utility classes for things not directly related to
33
+ producing and consuming, such as the DB Poller, DB Producer, lag reporter, etc.
34
+ * `lib/deimos/config`: Classes related to configuring Deimos.
35
+ * `lib/deimos/monkey_patches`: Monkey patches to existing libraries. These
36
+ should be removed in a future update.
37
+
38
+ ## Features
39
+
40
+ ### Producers and Consumers
41
+
42
+ Both producers and consumers include the `SharedConfig` module, which
43
+ standardizes configuration like schema settings, topic, keys, etc.
44
+
45
+ Consumers come in two flavors: `Consumer` and `BatchConsumer`. Both include
46
+ `BaseConsumer` for shared functionality.
47
+
48
+ While producing messages go to Kafka by default, literally anything else
49
+ can happen when your producer calls `produce`, by swapping out the producer
50
+ _backend_. This is just a file that needs to inherit from `Deimos::Backends::Base`
51
+ and must implement a single method, `execute`.
52
+
53
+ Producers have a complex workflow while processing the payload to publish. This
54
+ is aided by the `Deimos::Message` class (not to be confused with the
55
+ `KafkaMessage` class, which is an ActiveRecord used by the DB Producer feature,
56
+ below).
57
+
58
+ ### Schemas
59
+
60
+ Schema backends are used to encode and decode payloads into different formats
61
+ such as Avro. These are integrated with producers and consumers, as well
62
+ as test helpers. These are a bit more involved than producer backends, and
63
+ must define methods such as:
64
+ * `encode` a payload or key (when encoding a key, for Avro a key schema
65
+ may be auto-generated)
66
+ * `decode` a payload or key
67
+ * `validate` that a payload is correct for encoding
68
+ * `coerce` a payload into the given schema (e.g. turn ints into strings)
69
+ * Get a list of `schema_fields` in the configured schema, used when interacting
70
+ with ActiveRecord
71
+ * Define a `mock` backend when the given backend is used. This is used
72
+ during testing. Typically mock backends will validate values but not
73
+ actually encode/decode them.
74
+
75
+ ### Configuration
76
+
77
+ Deimos has its own `Configurable` module that makes heavy use of `method_missing`
78
+ to provide a very succinct but powerful configuration format (including
79
+ default values, procs, print out as hash, reset, etc.). It also
80
+ allows for multiple blocks to define different objects of the same time
81
+ (like producers, consumers, pollers etc.).
82
+
83
+ The configuration definition for Deimos is in `config/configuration.rb`. In
84
+ addition, there are methods in `config/phobos_config.rb` which translate to/from
85
+ the Phobos configuration format and support the old `phobos.yml` method
86
+ of configuration.
87
+
88
+ ### Metrics and Tracing
89
+
90
+ These are simpler than other plugins and must implement the expected methods
91
+ (`increment`, `gauge`, `histogram` and `time` for metrics, and `start`, `finish`
92
+ and `set_error` for tracing). These are used primarily in producers and consumers.
93
+
94
+ ### ActiveRecord Integration
95
+
96
+ Deimos provides an `ActiveRecordConsumer` and `ActiveRecordProducer`. These are
97
+ relatively lightweight ways to save data into a database or read it off
98
+ the database as part of app logic. It uses things like the `coerce` method
99
+ of the schema backends to manage the differences between the given payload
100
+ and the configured schema for the topic.
101
+
102
+ ### Database Backend / Database Producer
103
+
104
+ This feature (which provides better performance and transaction guarantees)
105
+ is powered by two components:
106
+ * The `db` _publish backend_, which saves messages to the database rather
107
+ than to Kafka;
108
+ * The `DbProducer` utility, which runs as a separate process, pulls data
109
+ from the database and sends it to Kafka.
110
+
111
+ There are a set of utility classes that power the producer, which are largely
112
+ copied from Phobos:
113
+ * `Executor` takes a set of "runnable" things (which implement a `start` and `stop`
114
+ method) puts them in a thread pool and runs them all concurrently. It
115
+ manages starting and stopping all threads when necessary.
116
+ * `SignalHandler` wraps the Executor and handles SIGINT and SIGTERM signals
117
+ to stop the executor gracefully.
118
+
119
+ In the case of this feature, the `DbProducer` is the runnable object - it
120
+ can run several threads at once.
121
+
122
+ On the database side, the `ActiveRecord` models that power this feature are:
123
+ * `KafkaMessage`: The actual message, saved to the database. This message
124
+ is already encoded by the producer, so only has to be sent.
125
+ * `KafkaTopicInfo`: Used for locking topics so only one producer can work
126
+ on it at once.
127
+
128
+ A Rake task (defined in `deimos.rake`) can be used to start the producer.
129
+
130
+ ### Database Poller
131
+
132
+ This feature (which periodically polls the database to send Kafka messages)
133
+ primarily uses other aspects of Deimos and hence is relatively small in size.
134
+ The `DbPoller` class acts as a "runnable" and is used by an Executor (above).
135
+ The `PollInfo` class is saved to the database to keep track of where each
136
+ poller is up to.
137
+
138
+ A Rake task (defined in `deimos.rake`) can be used to start the pollers.
139
+
140
+ ### Other Utilities
141
+
142
+ The `utils` folder also contains the `LagReporter` (which sends metrics on
143
+ lag) and the `InlineConsumer`, which can read data from a topic and directly
144
+ pass it into a handler or save it to memory.
@@ -58,6 +58,10 @@ Deimos.configure do
58
58
  namespace 'my.namespace'
59
59
  key_config field: :id
60
60
 
61
+ # Setting to :inline_batch will invoke consume_batch instead of consume
62
+ # for each batch of messages.
63
+ delivery :batch
64
+
61
65
  # If config.schema.path is app/schemas, assumes there is a file in
62
66
  # app/schemas/my/namespace/MyTopicSchema.avsc
63
67
  end
@@ -89,6 +93,29 @@ offset_commit_threshold|0|Number of messages that can be processed before their
89
93
  heartbeat_interval|10|Interval between heartbeats; must be less than the session window.
90
94
  backoff|`(1000..60_000)`|Range representing the minimum and maximum number of milliseconds to back off after a consumer error.
91
95
 
96
+ ## Defining Database Pollers
97
+
98
+ These are used when polling the database via `rake deimos:db_poller`. You
99
+ can create a number of pollers, one per topic.
100
+
101
+ ```ruby
102
+ Deimos.configure do
103
+ db_poller do
104
+ producer_class 'MyProducer'
105
+ run_every 2.minutes
106
+ end
107
+ end
108
+ ```
109
+
110
+ Config name|Default|Description
111
+ -----------|-------|-----------
112
+ producer_class|nil|ActiveRecordProducer class to use for sending messages.
113
+ run_every|60|Amount of time in seconds to wait between runs.
114
+ timestamp_column|`:updated_at`|Name of the column to query. Remember to add an index to this column!
115
+ delay_time|2|Amount of time in seconds to wait before picking up records, to allow for transactions to finish.
116
+ full_table|false|If set to true, do a full table dump to Kafka each run. Good for very small tables.
117
+ start_from_beginning|true|If false, start from the current time instead of the beginning of time if this is the first time running the poller.
118
+
92
119
  ## Kafka Configuration
93
120
 
94
121
  Config name|Default|Description
@@ -28,9 +28,10 @@ if defined?(ActiveRecord)
28
28
  require 'deimos/kafka_source'
29
29
  require 'deimos/kafka_topic_info'
30
30
  require 'deimos/backends/db'
31
- require 'deimos/utils/signal_handler.rb'
32
- require 'deimos/utils/executor.rb'
31
+ require 'sigurd/signal_handler.rb'
32
+ require 'sigurd/executor.rb'
33
33
  require 'deimos/utils/db_producer.rb'
34
+ require 'deimos/utils/db_poller'
34
35
  end
35
36
 
36
37
  require 'deimos/utils/inline_consumer'
@@ -71,10 +72,10 @@ module Deimos
71
72
  Deimos::Utils::DbProducer.
72
73
  new(self.config.db_producer.logger || self.config.logger)
73
74
  end
74
- executor = Deimos::Utils::Executor.new(producers,
75
- sleep_seconds: 5,
76
- logger: self.config.logger)
77
- signal_handler = Deimos::Utils::SignalHandler.new(executor)
75
+ executor = Sigurd::Executor.new(producers,
76
+ sleep_seconds: 5,
77
+ logger: self.config.logger)
78
+ signal_handler = Sigurd::SignalHandler.new(executor)
78
79
  signal_handler.run!
79
80
  end
80
81
  end