bolt 0.19.0 → 0.19.1

Sign up to get free protection for your applications and to get access to all the features.

Potentially problematic release.


This version of bolt might be problematic. Click here for more details.

checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA1:
3
- metadata.gz: a2227748b6143c5d3515d1c9f24faaf212eb30b7
4
- data.tar.gz: 5547e32cf4a0d588a201c4b011bb2c9fffae983f
3
+ metadata.gz: 67a9420306d6fd8eabb25f62ef540cd54105c9c4
4
+ data.tar.gz: 7247ca058c6d039f19a7967614c776b4d6fbaf09
5
5
  SHA512:
6
- metadata.gz: 4c628c28a11c9779918352684e2ce04ab3ff078ede27694f669312d63ac9329e1538ee870bb8238efb1cc81a8b2ef1e6d44af92f550173fe16f2d274cd1d0de1
7
- data.tar.gz: c0fdcf6f47139a80565fe393f9ad079dac585dab2cc5b490ea1db5e526be898878e0e544d0704504ca09a5ecfa9471611764f0089ea9610b2e64f2f0c49d11e5
6
+ metadata.gz: bbb8e273a0eb95d656003813d2b785043008edc828494663bf784111079e3065e2389e03c45b2ba9a4fe15c7536a1f59f4ed473b6d30634cec982b3c6c31af75
7
+ data.tar.gz: 4ae498d40e34ae98de03190b23099181f62db5fe02db1da84a34b591176d46828249aad0b6f8084448eb2d35f56c6e98f334c72d33845b4a64a2ed7e099c6fca
@@ -39,43 +39,44 @@ Puppet::Functions.create_function(:run_plan, Puppet::Functions::InternalFunction
39
39
  # The perspective of the environment is wanted here (for now) to not have to
40
40
  # require modules to have dependencies defined in meta data.
41
41
  loader = loaders.private_environment_loader
42
- if loader && (func = loader.load(:plan, plan_name))
43
- # TODO: Add profiling around this
44
- if (run_as = named_args['_run_as'])
45
- old_run_as = executor.run_as
46
- executor.run_as = run_as
47
- end
48
42
 
49
- begin
50
- # If the plan does not throw :return by calling the return function it's result is
51
- # undef/nil
52
- result = catch(:return) do
53
- func.class.dispatcher.dispatchers[0].call_by_name_with_scope(scope, params, true)
54
- nil
55
- end&.value
56
- # Validate the result is a PlanResult
57
- unless Puppet::Pops::Types::TypeParser.singleton.parse('Boltlib::PlanResult').instance?(result)
58
- raise Bolt::InvalidPlanResult.new(plan_name, result.to_s)
59
- end
60
- result
61
- rescue Puppet::PreformattedError => err
62
- if named_args['_catch_errors'] && err.cause.is_a?(Bolt::Error)
63
- result = err.cause.to_puppet_error
64
- else
65
- raise err
66
- end
67
- ensure
68
- if run_as
69
- executor.run_as = old_run_as
70
- end
71
- end
43
+ # TODO: Why would we not have a private_environment_loader?
44
+ unless loader && (func = loader.load(:plan, plan_name))
45
+ raise Puppet::ParseErrorWithIssue.from_issue_and_stack(
46
+ Puppet::Pops::Issues.issue(:UNKNOWN_PLAN) { Bolt::Error.unknown_plan(plan_name) }
47
+ )
48
+ end
72
49
 
73
- return result
50
+ # TODO: Add profiling around this
51
+ if (run_as = named_args['_run_as'])
52
+ old_run_as = executor.run_as
53
+ executor.run_as = run_as
54
+ end
55
+ result = nil
56
+ begin
57
+ # If the plan does not throw :return by calling the return function it's result is
58
+ # undef/nil
59
+ result = catch(:return) do
60
+ func.class.dispatcher.dispatchers[0].call_by_name_with_scope(scope, params, true)
61
+ nil
62
+ end&.value
63
+ # Validate the result is a PlanResult
64
+ unless Puppet::Pops::Types::TypeParser.singleton.parse('Boltlib::PlanResult').instance?(result)
65
+ raise Bolt::InvalidPlanResult.new(plan_name, result.to_s)
66
+ end
67
+ result
68
+ rescue Puppet::PreformattedError => err
69
+ if named_args['_catch_errors'] && err.cause.is_a?(Bolt::Error)
70
+ result = err.cause.to_puppet_error
71
+ else
72
+ raise err
73
+ end
74
+ ensure
75
+ if run_as
76
+ executor.run_as = old_run_as
77
+ end
74
78
  end
75
79
 
76
- # Could not find plan
77
- raise Puppet::ParseErrorWithIssue.from_issue_and_stack(
78
- Puppet::Pops::Issues.issue(:UNKNOWN_PLAN) { Bolt::Error.unknown_plan(plan_name) }
79
- )
80
+ result
80
81
  end
81
82
  end
@@ -243,6 +243,9 @@ Available options are:
243
243
  define('--format FORMAT', 'Output format to use: human or json') do |format|
244
244
  @options[:format] = format
245
245
  end
246
+ define('--[no-]color', 'Whether to show output in color') do |color|
247
+ @options[:color] = color
248
+ end
246
249
  define('-h', '--help', 'Display help') do |_|
247
250
  @options[:help] = true
248
251
  end
@@ -529,11 +532,20 @@ Available options are:
529
532
  end
530
533
  options[:task_options]['nodes'] = options[:nodes].join(',')
531
534
  end
532
- executor = Bolt::Executor.new(config, options[:noop], true)
535
+ params = options[:noop] ? options[:task_options].merge("_noop" => true) : options[:task_options]
536
+ plan_context = { plan_name: options[:object],
537
+ params: params }
538
+ plan_context[:description] = options[:description] if options[:description]
539
+
540
+ executor = Bolt::Executor.new(config, options[:noop])
541
+ executor.start_plan(plan_context)
533
542
  result = pal.run_plan(options[:object], options[:task_options], executor, inventory, puppetdb_client)
543
+
544
+ # If a non-bolt exeception bubbles up the plan won't get finished
545
+ # TODO: finish the plan once ORCH-2224
546
+ # executor.finish_plan(result)
534
547
  outputter.print_plan_result(result)
535
- # An exception would have been raised if the plan failed
536
- code = 0
548
+ code = result.ok? ? 0 : 1
537
549
  else
538
550
  executor = Bolt::Executor.new(config, options[:noop])
539
551
  targets = options[:targets]
@@ -614,7 +626,7 @@ Available options are:
614
626
  end
615
627
 
616
628
  def outputter
617
- @outputter ||= Bolt::Outputter.for_format(config[:format])
629
+ @outputter ||= Bolt::Outputter.for_format(config[:format], config[:color])
618
630
  end
619
631
  end
620
632
  end
@@ -30,6 +30,7 @@ module Bolt
30
30
  :log,
31
31
  :modulepath,
32
32
  :puppetdb,
33
+ :color,
33
34
  :transport,
34
35
  :transports
35
36
  ) do
@@ -39,7 +40,8 @@ module Bolt
39
40
  transport: 'ssh',
40
41
  format: 'human',
41
42
  modulepath: [],
42
- puppetdb: {}
43
+ puppetdb: {},
44
+ color: true
43
45
  }.freeze
44
46
 
45
47
  TRANSPORT_OPTIONS = %i[password run-as sudo-password extensions
@@ -129,8 +131,8 @@ module Bolt
129
131
  self[:modulepath] = data['modulepath'].split(File::PATH_SEPARATOR)
130
132
  end
131
133
 
132
- %w[inventoryfile concurrency format puppetdb].each do |key|
133
- if data[key]
134
+ %w[inventoryfile concurrency format puppetdb color].each do |key|
135
+ if data.key?(key)
134
136
  self[key.to_sym] = data[key]
135
137
  end
136
138
  end
@@ -150,8 +152,8 @@ module Bolt
150
152
  end
151
153
 
152
154
  def update_from_cli(options)
153
- %i[concurrency transport format modulepath inventoryfile].each do |key|
154
- self[key] = options[key] if options[key]
155
+ %i[concurrency transport format modulepath inventoryfile color].each do |key|
156
+ self[key] = options[key] if options.key?(key)
155
157
  end
156
158
 
157
159
  if options[:debug]
@@ -65,10 +65,6 @@ module Bolt
65
65
  end
66
66
 
67
67
  class PuppetError < Error
68
- def self.convert_puppet_errors(result)
69
- Bolt::Util.walk_vals(result) { |v| v.is_a?(Puppet::DataTypes::Error) ? from_error(v) : v }
70
- end
71
-
72
68
  def self.from_error(err)
73
69
  new(err.msg, err.kind, err.details, err.issue_code)
74
70
  end
@@ -16,10 +16,10 @@ module Bolt
16
16
  attr_reader :noop, :transports
17
17
  attr_accessor :run_as
18
18
 
19
- def initialize(config = Bolt::Config.new, noop = nil, plan_logging = false)
19
+ def initialize(config = Bolt::Config.new, noop = nil)
20
20
  @config = config
21
21
  @logger = Logging.logger[self]
22
- @plan_logging = plan_logging
22
+ @plan_logging = false
23
23
 
24
24
  @transports = Bolt::TRANSPORTS.each_with_object({}) do |(key, val), coll|
25
25
  coll[key.to_s] = Concurrent::Delay.new { val.new }
@@ -34,19 +34,12 @@ module Bolt
34
34
 
35
35
  def transport(transport)
36
36
  impl = @transports[transport || 'ssh']
37
+ raise(Bolt::UnknownTransportError, transport) unless impl
37
38
  # If there was an error creating the transport, ensure it gets thrown
38
39
  impl.no_error!
39
40
  impl.value
40
41
  end
41
42
 
42
- def summary(description, result)
43
- fc = result.error_set.length
44
- npl = result.length == 1 ? '' : 's'
45
- fpl = fc == 1 ? '' : 's'
46
- "Finished: #{description} on #{result.length} node#{npl} with #{fc} failure#{fpl}"
47
- end
48
- private :summary
49
-
50
43
  # Execute the given block on a list of nodes in parallel, one thread per "batch".
51
44
  #
52
45
  # This is the main driver of execution on a list of targets. It first
@@ -93,9 +86,27 @@ module Bolt
93
86
  ResultSet.new(promises.map(&:value))
94
87
  end
95
88
 
96
- # When running a plan, info messages like starting a task are promoted to notice.
97
- def log_action(msg)
98
- @plan_logging ? @logger.notice(msg) : @logger.info(msg)
89
+ def log_action(description, targets)
90
+ # When running a plan, info messages like starting a task are promoted to notice.
91
+ log_method = @plan_logging ? :notice : :info
92
+ target_str = if targets.length > 5
93
+ "#{targets.count} targets"
94
+ else
95
+ targets.map(&:uri).join(', ')
96
+ end
97
+
98
+ @logger.send(log_method, "Starting: #{description} on #{target_str}")
99
+
100
+ start_time = Time.now
101
+ results = yield
102
+ duration = Time.now - start_time
103
+
104
+ failures = results.error_set.length
105
+ plural = failures == 1 ? '' : 's'
106
+
107
+ @logger.send(log_method, "Finished: #{description} with #{failures} failure#{plural} in #{duration.round(2)} sec")
108
+
109
+ results
99
110
  end
100
111
  private :log_action
101
112
 
@@ -109,72 +120,89 @@ module Bolt
109
120
 
110
121
  def run_command(targets, command, options = {}, &callback)
111
122
  description = options.fetch('_description', "command '#{command}'")
112
- log_action("Starting: #{description} on #{targets.map(&:uri)}")
113
- notify = proc { |event| @notifier.notify(callback, event) if callback }
114
- options = { '_run_as' => run_as }.merge(options) if run_as
123
+ log_action(description, targets) do
124
+ notify = proc { |event| @notifier.notify(callback, event) if callback }
125
+ options = { '_run_as' => run_as }.merge(options) if run_as
115
126
 
116
- results = batch_execute(targets) do |transport, batch|
117
- with_node_logging("Running command '#{command}'", batch) do
118
- transport.batch_command(batch, command, options, &notify)
127
+ results = batch_execute(targets) do |transport, batch|
128
+ with_node_logging("Running command '#{command}'", batch) do
129
+ transport.batch_command(batch, command, options, &notify)
130
+ end
119
131
  end
120
- end
121
132
 
122
- log_action(summary(description, results))
123
- @notifier.shutdown
124
- results
133
+ @notifier.shutdown
134
+ results
135
+ end
125
136
  end
126
137
 
127
138
  def run_script(targets, script, arguments, options = {}, &callback)
128
139
  description = options.fetch('_description', "script #{script}")
129
- log_action("Starting: #{description} on #{targets.map(&:uri)}")
140
+ log_action(description, targets) do
141
+ notify = proc { |event| @notifier.notify(callback, event) if callback }
142
+ options = { '_run_as' => run_as }.merge(options) if run_as
130
143
 
131
- notify = proc { |event| @notifier.notify(callback, event) if callback }
132
- options = { '_run_as' => run_as }.merge(options) if run_as
133
-
134
- results = batch_execute(targets) do |transport, batch|
135
- with_node_logging("Running script #{script} with '#{arguments}'", batch) do
136
- transport.batch_script(batch, script, arguments, options, &notify)
144
+ results = batch_execute(targets) do |transport, batch|
145
+ with_node_logging("Running script #{script} with '#{arguments}'", batch) do
146
+ transport.batch_script(batch, script, arguments, options, &notify)
147
+ end
137
148
  end
138
- end
139
149
 
140
- log_action(summary(description, results))
141
- @notifier.shutdown
142
- results
150
+ @notifier.shutdown
151
+ results
152
+ end
143
153
  end
144
154
 
145
155
  def run_task(targets, task, arguments, options = {}, &callback)
146
156
  description = options.fetch('_description', "task #{task.name}")
147
- log_action("Starting: #{description} on #{targets.map(&:uri)}")
157
+ log_action(description, targets) do
158
+ notify = proc { |event| @notifier.notify(callback, event) if callback }
159
+ options = { '_run_as' => run_as }.merge(options) if run_as
148
160
 
149
- notify = proc { |event| @notifier.notify(callback, event) if callback }
150
- options = { '_run_as' => run_as }.merge(options) if run_as
151
-
152
- results = batch_execute(targets) do |transport, batch|
153
- with_node_logging("Running task #{task.name} with '#{arguments}' via #{task.input_method}", batch) do
154
- transport.batch_task(batch, task, arguments, options, &notify)
161
+ results = batch_execute(targets) do |transport, batch|
162
+ with_node_logging("Running task #{task.name} with '#{arguments}' via #{task.input_method}", batch) do
163
+ transport.batch_task(batch, task, arguments, options, &notify)
164
+ end
155
165
  end
156
- end
157
166
 
158
- log_action(summary(description, results))
159
- @notifier.shutdown
160
- results
167
+ @notifier.shutdown
168
+ results
169
+ end
161
170
  end
162
171
 
163
172
  def file_upload(targets, source, destination, options = {}, &callback)
164
173
  description = options.fetch('_description', "file upload from #{source} to #{destination}")
165
- log_action("Starting: #{description} on #{targets.map(&:uri)}")
166
- notify = proc { |event| @notifier.notify(callback, event) if callback }
167
- options = { '_run_as' => run_as }.merge(options) if run_as
174
+ log_action(description, targets) do
175
+ notify = proc { |event| @notifier.notify(callback, event) if callback }
176
+ options = { '_run_as' => run_as }.merge(options) if run_as
168
177
 
169
- results = batch_execute(targets) do |transport, batch|
170
- with_node_logging("Uploading file #{source} to #{destination}", batch) do
171
- transport.batch_upload(batch, source, destination, options, &notify)
178
+ results = batch_execute(targets) do |transport, batch|
179
+ with_node_logging("Uploading file #{source} to #{destination}", batch) do
180
+ transport.batch_upload(batch, source, destination, options, &notify)
181
+ end
172
182
  end
183
+
184
+ @notifier.shutdown
185
+ results
173
186
  end
187
+ end
174
188
 
175
- log_action(summary(description, results))
176
- @notifier.shutdown
177
- results
189
+ # Plan context doesn't make sense for most transports but it is tightly
190
+ # coupled with the orchestrator transport since the transport behaves
191
+ # differently when a plan is running. In order to limit how much this
192
+ # pollutes the transport API we only handle the orchestrator transport here.
193
+ # Since we callt this function without resolving targets this will result
194
+ # in the orchestrator transport always being initialized during plan runs.
195
+ # For now that's ok.
196
+ #
197
+ # In the future if other transports need this or if we want a plan stack
198
+ # we'll need to refactor.
199
+ def start_plan(plan_context)
200
+ transport('pcp').plan_context = plan_context
201
+ @plan_logging = true
202
+ end
203
+
204
+ def finish_plan(plan_result)
205
+ transport('pcp').finish_plan(plan_result)
178
206
  end
179
207
  end
180
208
  end
@@ -16,19 +16,29 @@ module Bolt
16
16
 
17
17
  Logging.init :debug, :info, :notice, :warn, :error, :fatal, :any
18
18
 
19
+ Logging.color_scheme(
20
+ 'bolt',
21
+ lines: {
22
+ notice: :green,
23
+ warn: :yellow,
24
+ error: :red,
25
+ fatal: %i[white on_red]
26
+ }
27
+ )
28
+ end
29
+
30
+ def self.configure(config)
19
31
  root_logger = Logging.logger[:root]
32
+
20
33
  root_logger.add_appenders Logging.appenders.stderr(
21
34
  'console',
22
- layout: default_layout,
35
+ layout: console_layout(config[:color]),
23
36
  level: default_level
24
37
  )
38
+
25
39
  # We set the root logger's level so that it logs everything but we do
26
40
  # limit what's actually logged in every appender individually.
27
41
  root_logger.level = :all
28
- end
29
-
30
- def self.configure(config)
31
- root_logger = Logging.logger[:root]
32
42
 
33
43
  config[:log].each_pair do |name, params|
34
44
  appender = Logging.appenders[name]
@@ -56,8 +66,16 @@ module Bolt
56
66
  end
57
67
  end
58
68
 
69
+ def self.console_layout(color)
70
+ color_scheme = :bolt if color
71
+ Logging.layouts.pattern(
72
+ pattern: '%m\n',
73
+ color_scheme: color_scheme
74
+ )
75
+ end
76
+
59
77
  def self.default_layout
60
- @default_layout ||= Logging.layouts.pattern(
78
+ Logging.layouts.pattern(
61
79
  pattern: '%d %-6l %c: %m\n',
62
80
  date_pattern: '%Y-%m-%dT%H:%M:%S.%6N'
63
81
  )
@@ -2,18 +2,19 @@
2
2
 
3
3
  module Bolt
4
4
  class Outputter
5
- def self.for_format(format)
5
+ def self.for_format(format, color)
6
6
  case format
7
7
  when 'human'
8
- Bolt::Outputter::Human.new
8
+ Bolt::Outputter::Human.new(color)
9
9
  when 'json'
10
- Bolt::Outputter::JSON.new
10
+ Bolt::Outputter::JSON.new(color)
11
11
  when nil
12
12
  raise "Cannot use outputter before parsing."
13
13
  end
14
14
  end
15
15
 
16
- def initialize(stream = $stdout)
16
+ def initialize(color, stream = $stdout)
17
+ @color = color
17
18
  @stream = stream
18
19
  end
19
20
 
@@ -11,7 +11,7 @@ module Bolt
11
11
  def print_head; end
12
12
 
13
13
  def colorize(color, string)
14
- if @stream.isatty
14
+ if @color && @stream.isatty
15
15
  "\033[#{COLORS[color]}m#{string}\033[0m"
16
16
  else
17
17
  string
@@ -3,11 +3,11 @@
3
3
  module Bolt
4
4
  class Outputter
5
5
  class JSON < Bolt::Outputter
6
- def initialize(stream = $stdout)
6
+ def initialize(color, stream = $stdout)
7
7
  @items_open = false
8
8
  @object_open = false
9
9
  @preceding_item = false
10
- super(stream)
10
+ super(color, stream)
11
11
  end
12
12
 
13
13
  def print_head
@@ -2,6 +2,7 @@
2
2
 
3
3
  require 'bolt/executor'
4
4
  require 'bolt/error'
5
+ require 'bolt/plan_result'
5
6
 
6
7
  module Bolt
7
8
  class PAL
@@ -12,7 +13,7 @@ module Bolt
12
13
  # Nothing works without initialized this global state. Reinitializing
13
14
  # is safe and in practice only happen in tests
14
15
  self.class.load_puppet
15
- self.class.configure_logging
16
+
16
17
  # This makes sure we don't accidentally create puppet dirs
17
18
  with_puppet_settings { |_| nil }
18
19
 
@@ -21,7 +22,10 @@ module Bolt
21
22
 
22
23
  # Puppet logging is global so this is class method to avoid confusion
23
24
  def self.configure_logging
24
- Puppet::Util::Log.newdestination(:console)
25
+ Puppet::Util::Log.newdestination(Logging.logger['Puppet'])
26
+ # Defer all log level decisions to the Logging library by telling Puppet
27
+ # to log everything
28
+ Puppet.settings[:log_level] = 'debug'
25
29
  end
26
30
 
27
31
  def self.load_puppet
@@ -39,6 +43,8 @@ module Bolt
39
43
  raise Bolt::CLIError, "Puppet must be installed to execute tasks"
40
44
  end
41
45
 
46
+ require 'bolt/pal/logging'
47
+
42
48
  # Now that puppet is loaded we can include puppet mixins in data types
43
49
  Bolt::ResultSet.include_iterable
44
50
  end
@@ -130,6 +136,7 @@ module Bolt
130
136
  end
131
137
  Puppet.settings.send(:clear_everything_for_tests)
132
138
  Puppet.initialize_settings(cli)
139
+ self.class.configure_logging
133
140
  yield
134
141
  end
135
142
  end
@@ -229,8 +236,10 @@ module Bolt
229
236
  def run_plan(plan_name, params, executor = nil, inventory = nil, pdb_client = nil)
230
237
  in_plan_compiler(executor, inventory, pdb_client) do |compiler|
231
238
  r = compiler.call_function('run_plan', plan_name, params)
232
- Bolt::PuppetError.convert_puppet_errors(r)
239
+ Bolt::PlanResult.from_pcore(r, 'success')
233
240
  end
241
+ rescue Bolt::Error => e
242
+ Bolt::PlanResult.new(e, 'failure')
234
243
  end
235
244
  end
236
245
  end
@@ -0,0 +1,28 @@
1
+ # frozen_string_literal: true
2
+
3
+ Puppet::Util::Log.newdesttype :logging do
4
+ match "Logging::Logger"
5
+
6
+ # Bolt log levels don't match exactly with Puppet log levels, so we use
7
+ # an explicit mapping.
8
+ def initialize(logger)
9
+ @external_logger = logger
10
+
11
+ @log_level_map = {
12
+ debug: :debug,
13
+ info: :info,
14
+ notice: :notice,
15
+ warning: :warn,
16
+ err: :error,
17
+ # Nothing in Puppet actually uses alert, emerg or crit, so it's hard to say
18
+ # what they indicate, but they sound pretty bad.
19
+ alert: :error,
20
+ emerg: :fatal,
21
+ crit: :fatal
22
+ }
23
+ end
24
+
25
+ def handle(log)
26
+ @external_logger.send(@log_level_map[log.level], log.to_s)
27
+ end
28
+ end
@@ -0,0 +1,40 @@
1
+ # frozen_string_literal: true
2
+
3
+ require 'json'
4
+ require 'bolt/error'
5
+ require 'bolt/util'
6
+
7
+ module Bolt
8
+ class PlanResult
9
+ attr_accessor :value, :status
10
+
11
+ # This must be called from inside a compiler
12
+ def self.from_pcore(result, status)
13
+ result = Bolt::Util.walk_vals(result) do |v|
14
+ if v.is_a?(Puppet::DataTypes::Error)
15
+ Bolt::PuppetError.from_error(v)
16
+ else
17
+ v
18
+ end
19
+ end
20
+ new(result, status)
21
+ end
22
+
23
+ def initialize(value, status)
24
+ @value = value
25
+ @status = status
26
+ end
27
+
28
+ def ok?
29
+ @status == 'success'
30
+ end
31
+
32
+ def ==(other)
33
+ value == other.value && status == other.status
34
+ end
35
+
36
+ def to_json(*args)
37
+ @value.to_json(*args)
38
+ end
39
+ end
40
+ end
@@ -5,6 +5,7 @@ require 'concurrent'
5
5
  require 'json'
6
6
  require 'orchestrator_client'
7
7
  require 'bolt/transport/base'
8
+ require 'bolt/transport/orch/connection'
8
9
  require 'bolt/result'
9
10
 
10
11
  module Bolt
@@ -15,6 +16,8 @@ module Bolt
15
16
  BOLT_SCRIPT_TASK = Struct.new(:name).new('bolt_shim::script').freeze
16
17
  BOLT_UPLOAD_TASK = Struct.new(:name).new('bolt_shim::upload').freeze
17
18
 
19
+ attr_writer :plan_context
20
+
18
21
  def self.options
19
22
  %w[service-url cacert token-file task-environment local-validation]
20
23
  end
@@ -26,30 +29,31 @@ module Bolt
26
29
  end
27
30
  end
28
31
 
29
- def create_client(opts)
30
- client_keys = %i[service-url token-file cacert]
31
- client_opts = opts.reduce({}) do |acc, (k, v)|
32
- if client_keys.include?(k)
33
- acc.merge(k.to_s => v)
34
- else
35
- acc
32
+ def initialize(*args)
33
+ @connections = {}
34
+ super
35
+ end
36
+
37
+ def finish_plan(result)
38
+ if result.is_a? Bolt::PlanResult
39
+ @connections.each_value do |conn|
40
+ begin
41
+ conn.finish_plan(result)
42
+ rescue StandardError => e
43
+ @logger.error("Failed to finish plan on #{conn.key}: #{e.message}")
44
+ end
36
45
  end
37
46
  end
38
- logger.debug("Creating orchestrator client for #{client_opts}")
39
-
40
- OrchestratorClient.new(client_opts, true)
41
47
  end
42
48
 
43
- def build_request(targets, task, arguments, description = nil)
44
- body = { task: task.name,
45
- environment: targets.first.options["task-environment"],
46
- noop: arguments['_noop'],
47
- params: arguments.reject { |k, _| k == '_noop' },
48
- scope: {
49
- nodes: targets.map(&:host)
50
- } }
51
- body[:description] = description if description
52
- body
49
+ # It's safe to create connections here for now because the
50
+ # batches/threads are per connection.
51
+ def get_connection(conn_opts)
52
+ key = Connection.get_key(conn_opts)
53
+ unless (conn = @connections[key])
54
+ conn = @connections[key] = Connection.new(conn_opts, @plan_context, logger)
55
+ end
56
+ conn
53
57
  end
54
58
 
55
59
  def process_run_results(targets, results)
@@ -134,22 +138,16 @@ module Bolt
134
138
  end
135
139
 
136
140
  def batches(targets)
137
- targets.group_by do |target|
138
- [target.options['task-environment'],
139
- target.options['service-url'],
140
- target.options['token-file']]
141
- end.values
141
+ targets.group_by { |target| Connection.get_key(target.options) }.values
142
142
  end
143
143
 
144
144
  def run_task_job(targets, task, arguments, options)
145
- body = build_request(targets, task, arguments, options['_description'])
146
-
147
145
  targets.each do |target|
148
146
  yield(type: :node_start, target: target) if block_given?
149
147
  end
150
148
 
151
149
  begin
152
- results = create_client(targets.first.options).run_task(body)
150
+ results = get_connection(targets.first.options).run_task(targets, task, arguments, options)
153
151
 
154
152
  process_run_results(targets, results)
155
153
  rescue OrchestratorClient::ApiError => e
@@ -0,0 +1,80 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Bolt
4
+ module Transport
5
+ class Orch < Base
6
+ class Connection
7
+ attr_reader :logger, :key
8
+
9
+ CONTEXT_KEYS = Set.new(%i[plan_name description params]).freeze
10
+
11
+ def self.get_key(opts)
12
+ [
13
+ opts['service-url'],
14
+ opts['task-environment'],
15
+ opts['token-file']
16
+ ].join('-')
17
+ end
18
+
19
+ def initialize(opts, plan_context, logger)
20
+ @logger = logger
21
+ @key = self.class.get_key(opts)
22
+ client_keys = %w[service-url token-file cacert]
23
+ client_opts = client_keys.each_with_object({}) do |k, acc|
24
+ acc[k] = opts[k] if opts.include?(k)
25
+ end
26
+ logger.debug("Creating orchestrator client for #{client_opts}")
27
+
28
+ @client = OrchestratorClient.new(client_opts, true)
29
+ @plan_job = start_plan(plan_context)
30
+
31
+ @environment = opts["task-environment"]
32
+ end
33
+
34
+ def start_plan(plan_context)
35
+ if plan_context
36
+ begin
37
+ opts = plan_context.select { |k, _| CONTEXT_KEYS.include? k }
38
+ @client.command.plan_start(opts)['name']
39
+ rescue OrchestratorClient::ApiError => e
40
+ if e.code == '404'
41
+ @logger.debug("Orchestrator #{key} does not support plans")
42
+ else
43
+ @logger.error("Failed to start a plan with orchestrator #{key}: #{e.message}")
44
+ end
45
+ nil
46
+ end
47
+ end
48
+ end
49
+
50
+ def finish_plan(plan_result)
51
+ if @plan_job
52
+ @client.command.plan_finish(
53
+ plan_job: @plan_job,
54
+ result: plan_result.value,
55
+ status: plan_result.status
56
+ )
57
+ end
58
+ end
59
+
60
+ def build_request(targets, task, arguments, description = nil)
61
+ body = { task: task.name,
62
+ environment: @environment,
63
+ noop: arguments['_noop'],
64
+ params: arguments.reject { |k, _| k == '_noop' },
65
+ scope: {
66
+ nodes: targets.map(&:host)
67
+ } }
68
+ body[:description] = description if description
69
+ body[:plan_job] = @plan_job if @plan_job
70
+ body
71
+ end
72
+
73
+ def run_task(targets, task, arguments, options)
74
+ body = build_request(targets, task, arguments, options['_description'])
75
+ @client.run_task(body)
76
+ end
77
+ end
78
+ end
79
+ end
80
+ end
@@ -1,5 +1,5 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module Bolt
4
- VERSION = '0.19.0'
4
+ VERSION = '0.19.1'
5
5
  end
@@ -0,0 +1,18 @@
1
+ {
2
+ "description": "Apply a single Puppet resource",
3
+ "input_method": "stdin",
4
+ "parameters": {
5
+ "type": {
6
+ "description": "The type of resource to apply",
7
+ "type": "String[1]"
8
+ },
9
+ "title": {
10
+ "description": "The title of the resource to apply",
11
+ "type": "String[1]"
12
+ },
13
+ "params": {
14
+ "description": "A map of parameter names and values to apply",
15
+ "type": "Optional[Hash[String[1], Data]]"
16
+ }
17
+ }
18
+ }
@@ -0,0 +1,98 @@
1
+ #!/opt/puppetlabs/puppet/bin/ruby
2
+ # frozen_string_literal: true
3
+
4
+ require 'json'
5
+ require 'puppet'
6
+
7
+ def apply_resource(type, title, params)
8
+ result = {
9
+ type: type,
10
+ title: title,
11
+ changed: false,
12
+ }
13
+
14
+ resource = Puppet::Resource.new(type, title, parameters: params)
15
+
16
+ # Configure the indirection to manage the resource locally and not cache it anywhere else
17
+ Puppet::Resource.indirection.terminus_class = :ral
18
+ Puppet::Resource.indirection.cache_class = nil
19
+
20
+ _saved_resource, report = Puppet::Resource.indirection.save(resource)
21
+
22
+ # This step is necessary to compute the report "status"
23
+ report.finalize_report
24
+
25
+ resource_status = report.resource_statuses.values.first
26
+
27
+ # Non-existent resource types and strange built-in types like Whit and Stage
28
+ # cause no resource to be managed.
29
+ unless resource_status
30
+ result[:_error] = { msg: "Invalid resource type #{type}",
31
+ kind: 'apply/type-not-found',
32
+ details: {} }
33
+ return result
34
+ end
35
+
36
+ # XXX currently ignoring noop and audit events
37
+ failures = resource_status.events.select { |event| event.status == 'failure' }
38
+ changes = resource_status.events.select { |event| event.status == 'success' }
39
+
40
+ if failures.any?
41
+ result[:failures] = failures.map do |event|
42
+ {
43
+ property: event.property,
44
+ previous_value: event.previous_value,
45
+ desired_value: event.desired_value,
46
+ message: event.message,
47
+ }
48
+ end
49
+ end
50
+
51
+ if changes.any?
52
+ result[:changes] = changes.map do |event|
53
+ {
54
+ property: event.property,
55
+ previous_value: event.previous_value,
56
+ desired_value: event.desired_value,
57
+ message: event.message,
58
+ }
59
+ end
60
+ end
61
+
62
+ result[:changed] = true if resource_status.changed?
63
+
64
+ if report.status == 'failed'
65
+ error_message = failures.map(&:message).join("\n").strip
66
+
67
+ result[:_error] = { msg: error_message,
68
+ kind: 'apply/resource-failure',
69
+ details: {} }
70
+ end
71
+
72
+ result
73
+ rescue StandardError => e
74
+ result[:_error] = {
75
+ msg: "Could not manage resource: #{e}",
76
+ kind: 'apply/unknown-error',
77
+ details: {},
78
+ }
79
+ result
80
+ end
81
+
82
+ args = JSON.parse(STDIN.read)
83
+
84
+ type = args['type']
85
+ title = args['title']
86
+ params = args['params'] || {}
87
+
88
+ # Required to find pluginsync'd plugins
89
+ Puppet.initialize_settings
90
+
91
+ Puppet[:noop] = false
92
+
93
+ result = apply_resource(type, title, params)
94
+ exitcode = result.key?(:_error) ? 1 : 0
95
+
96
+ puts result.to_json
97
+
98
+ exit exitcode
@@ -0,0 +1,14 @@
1
+ {
2
+ "description": "Manage the state of services (without a puppet agent)",
3
+ "input_method": "environment",
4
+ "parameters": {
5
+ "action": {
6
+ "description": "The operation (start, stop) to perform on the service",
7
+ "type": "Enum[start, stop, restart]"
8
+ },
9
+ "name": {
10
+ "description": "The name of the service to operate on.",
11
+ "type": "String[1]"
12
+ }
13
+ }
14
+ }
@@ -0,0 +1,45 @@
1
+ #!/bin/bash
2
+
3
+ action="$PT_action"
4
+ name="$PT_name"
5
+ service_managers[0]="systemctl"
6
+ service_managers[1]="service"
7
+ service_managers[2]="initctl"
8
+
9
+ # example cli /opt/puppetlabs/puppet/bin/bolt task run service::linux action=stop name=ntp --nodes localhost --modulepath /etc/puppetlabs/code/modules --password puppet --user root
10
+
11
+ check_command_exists() {
12
+ (which "$1") > /dev/null 2>&1
13
+ command_exists=$?
14
+ return $command_exists
15
+ }
16
+
17
+ for service_manager in "${service_managers[@]}"
18
+ do
19
+ check_command_exists "$service_manager"
20
+ command_exists=$?
21
+ if [ $command_exists -eq 0 ]; then
22
+ command_line="$service_manager $action $name"
23
+ if [ $service_manager == "service" ]; then
24
+ command_line="$service_manager $name $action"
25
+ fi
26
+ output=$($command_line 2>&1)
27
+ status_from_command=$?
28
+ # set up our status and exit code
29
+ if [ $status_from_command -eq 0 ]; then
30
+ echo "{ \"status\": \"$name $action\" }"
31
+ exit 0
32
+ else
33
+ # initd is special, starting an already started service is an error
34
+ if [[ $service_manager == "service" && "$output" == *"Job is already running"* ]]; then
35
+ echo "{ \"status\": \"$name $action\" }"
36
+ exit 0
37
+ fi
38
+ echo "{ \"status\": \"unable to run command '$command_line'\" }"
39
+ exit $status_from_command
40
+ fi
41
+ fi
42
+ done
43
+
44
+ echo "{ \"status\": \"No service managers found\" }"
45
+ exit 255
@@ -0,0 +1,14 @@
1
+ {
2
+ "description": "Manage the state of Windows services (without a puppet agent)",
3
+ "input_method": "powershell",
4
+ "parameters": {
5
+ "action": {
6
+ "description": "The operation (start, stop, restart, status) to perform on the service",
7
+ "type": "Enum[start, stop, restart, status]"
8
+ },
9
+ "name": {
10
+ "description": "The short name of the Windows service to operate on.",
11
+ "type": "String[1]"
12
+ }
13
+ }
14
+ }
@@ -0,0 +1,85 @@
1
+ [CmdletBinding()]
2
+ param(
3
+ # NOTE: init.json cannot yet be shared, so must have windows.json / windows.ps1
4
+ [Parameter(Mandatory = $true)]
5
+ [String]
6
+ $Name,
7
+
8
+ [Parameter(Mandatory = $true)]
9
+ [ValidateSet('start', 'stop', 'restart', 'status')]
10
+ [String]
11
+ $Action
12
+ )
13
+
14
+ $ErrorActionPreference = 'Stop'
15
+
16
+ function Invoke-ServiceAction($Service, $Action)
17
+ {
18
+ $inSyncStatus = 'in_sync'
19
+ $status = $null
20
+
21
+ switch ($Action)
22
+ {
23
+ 'start'
24
+ {
25
+ if ($Service.Status -eq 'Running') { $status = $InSyncStatus }
26
+ else { Start-Service $Service }
27
+ }
28
+ 'stop'
29
+ {
30
+ if ($Service.Status -eq 'Stopped') { $status = $InSyncStatus }
31
+ else { Stop-Service $Service }
32
+ }
33
+ 'restart'
34
+ {
35
+ Restart-Service $Service
36
+ $status = 'restarted'
37
+ }
38
+ # no-op since status always returned
39
+ 'status' { }
40
+ }
41
+
42
+ # user action
43
+ if ($status -eq $null)
44
+ {
45
+ # https://msdn.microsoft.com/en-us/library/system.serviceprocess.servicecontrollerstatus(v=vs.110).aspx
46
+ # ContinuePending, Paused, PausePending, Running, StartPending, Stopped, StopPending
47
+ $status = $Service.Status
48
+ if ($status -eq 'Running') { $status = 'started' }
49
+ }
50
+
51
+ return $status
52
+ }
53
+
54
+ try
55
+ {
56
+ $service = Get-Service -Name $Name
57
+ $status = Invoke-ServiceAction -Service $service -Action $action
58
+
59
+ # TODO: could use ConvertTo-Json, but that requires PS3
60
+ # if embedding in literal, should make sure Name / Status doesn't need escaping
61
+ Write-Host @"
62
+ {
63
+ "name" : "$($service.Name)",
64
+ "action" : "$Action",
65
+ "displayName" : "$($service.DisplayName)",
66
+ "status" : "$status",
67
+ "startType" : "$($service.StartType)"
68
+ }
69
+ "@
70
+ }
71
+ catch
72
+ {
73
+ Write-Host @"
74
+ {
75
+ "status" : "failure",
76
+ "name" : "$Name",
77
+ "action" : "$Action",
78
+ "_error" : {
79
+ "msg" : "Unable to perform '$Action' on '$Name': $($_.Exception.Message)",
80
+ "kind": "powershell_error",
81
+ "details" : {}
82
+ }
83
+ }
84
+ "@
85
+ }
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: bolt
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.19.0
4
+ version: 0.19.1
5
5
  platform: ruby
6
6
  authors:
7
7
  - Puppet
8
8
  autorequire:
9
9
  bindir: exe
10
10
  cert_chain: []
11
- date: 2018-04-11 00:00:00.000000000 Z
11
+ date: 2018-04-27 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: addressable
@@ -86,14 +86,14 @@ dependencies:
86
86
  requirements:
87
87
  - - "~>"
88
88
  - !ruby/object:Gem::Version
89
- version: '0.2'
89
+ version: 0.2.4
90
90
  type: :runtime
91
91
  prerelease: false
92
92
  version_requirements: !ruby/object:Gem::Requirement
93
93
  requirements:
94
94
  - - "~>"
95
95
  - !ruby/object:Gem::Version
96
- version: '0.2'
96
+ version: 0.2.4
97
97
  - !ruby/object:Gem::Dependency
98
98
  name: terminal-table
99
99
  requirement: !ruby/object:Gem::Requirement
@@ -347,6 +347,8 @@ files:
347
347
  - lib/bolt/outputter/human.rb
348
348
  - lib/bolt/outputter/json.rb
349
349
  - lib/bolt/pal.rb
350
+ - lib/bolt/pal/logging.rb
351
+ - lib/bolt/plan_result.rb
350
352
  - lib/bolt/puppetdb.rb
351
353
  - lib/bolt/puppetdb/client.rb
352
354
  - lib/bolt/puppetdb/config.rb
@@ -357,6 +359,7 @@ files:
357
359
  - lib/bolt/transport/local.rb
358
360
  - lib/bolt/transport/local/shell.rb
359
361
  - lib/bolt/transport/orch.rb
362
+ - lib/bolt/transport/orch/connection.rb
360
363
  - lib/bolt/transport/ssh.rb
361
364
  - lib/bolt/transport/ssh/connection.rb
362
365
  - lib/bolt/transport/winrm.rb
@@ -369,6 +372,8 @@ files:
369
372
  - modules/aggregate/lib/puppet/functions/aggregate/nodes.rb
370
373
  - modules/aggregate/plans/count.pp
371
374
  - modules/aggregate/plans/nodes.pp
375
+ - modules/apply/tasks/resource.json
376
+ - modules/apply/tasks/resource.rb
372
377
  - modules/canary/lib/puppet/functions/canary/merge.rb
373
378
  - modules/canary/lib/puppet/functions/canary/random_split.rb
374
379
  - modules/canary/lib/puppet/functions/canary/skip.rb
@@ -387,6 +392,10 @@ files:
387
392
  - modules/puppetdb_fact/plans/init.pp
388
393
  - modules/service/tasks/init.json
389
394
  - modules/service/tasks/init.rb
395
+ - modules/service/tasks/linux.json
396
+ - modules/service/tasks/linux.sh
397
+ - modules/service/tasks/windows.json
398
+ - modules/service/tasks/windows.ps1
390
399
  - vendored/facter/lib/facter.rb
391
400
  - vendored/facter/lib/facter/Cfkey.rb
392
401
  - vendored/facter/lib/facter/application.rb