bolt 0.16.1 → 0.16.2

Sign up to get free protection for your applications and to get access to all the features.

Potentially problematic release.


This version of bolt might be problematic. Click here for more details.

@@ -1,7 +1,7 @@
1
1
  module Bolt
2
2
  class Inventory
3
3
  # Group is a specific implementation of Inventory based on nested
4
- # structred data.
4
+ # structured data.
5
5
  class Group
6
6
  attr_accessor :name, :nodes, :groups, :config, :rest
7
7
 
@@ -30,29 +30,45 @@ module Bolt
30
30
  @rest = data.reject { |k, _| %w[name nodes config groups].include? k }
31
31
  end
32
32
 
33
- def validate(used_names = [], depth = 0)
34
- raise ValidationError.new("Group does not have a name", nil) unless @name
33
+ def validate(used_names = Set.new, node_names = Set.new, depth = 0)
34
+ raise ValidationError.new("Group does not have a name", nil) unless @name
35
35
  if used_names.include?(@name)
36
36
  raise ValidationError.new("Tried to redefine group #{@name}", @name)
37
37
  end
38
38
  raise ValidationError.new("Invalid Group name #{@name}", @name) unless @name =~ /\A[a-z0-9_]+\Z/
39
39
 
40
+ if node_names.include?(@name)
41
+ raise ValidationError.new("Group #{@name} conflicts with node of the same name", @name)
42
+ end
40
43
  raise ValidationError.new("Group #{@name} is too deeply nested", @name) if depth > 1
41
44
 
42
45
  used_names << @name
43
46
 
44
47
  @nodes.each do |n|
45
- raise ValidationError.new("node #{n['name']} does not have a name", @name) unless n['name']
48
+ # Require nodes to be referenced only by their host name
49
+ host = Addressable::URI.parse('//' + n['name']).host
50
+ ipv6host = Addressable::URI.parse('//[' + n['name'] + ']').host
51
+ if n['name'] != host && n['name'] != ipv6host
52
+ raise ValidationError.new("Invalid node name #{n['name']}", n['name'])
53
+ end
54
+
55
+ raise ValidationError.new("Node #{n['name']} does not have a name", n['name']) unless n['name']
56
+ if used_names.include?(n['name'])
57
+ raise ValidationError.new("Group #{n['name']} conflicts with node of the same name", n['name'])
58
+ end
59
+
60
+ node_names << n['name']
46
61
  end
47
62
 
48
63
  @groups.each do |g|
49
64
  begin
50
- g.validate(used_names, depth + 1)
65
+ g.validate(used_names, node_names, depth + 1)
51
66
  rescue ValidationError => e
52
67
  e.add_parent(@name)
53
68
  raise e
54
69
  end
55
70
  end
71
+
56
72
  nil
57
73
  end
58
74
 
@@ -92,9 +108,24 @@ module Bolt
92
108
  }
93
109
  end
94
110
 
111
+ # Returns all nodes contained within the group, which includes nodes from subgroups.
95
112
  def node_names
113
+ @groups.inject(local_node_names) do |acc, g|
114
+ acc.merge(g.node_names)
115
+ end
116
+ end
117
+
118
+ # Return a mapping of group names to group.
119
+ def collect_groups
120
+ @groups.inject(name => self) do |acc, g|
121
+ acc.merge(g.collect_groups)
122
+ end
123
+ end
124
+
125
+ def local_node_names
96
126
  @_node_names ||= Set.new(nodes.map { |n| n['name'] })
97
127
  end
128
+ private :local_node_names
98
129
 
99
130
  def node_collect(node_name)
100
131
  data = @groups.inject(nil) do |acc, g|
@@ -118,7 +149,7 @@ module Bolt
118
149
 
119
150
  if data
120
151
  data_merge(group_data, data)
121
- elsif node_names.include?(node_name)
152
+ elsif local_node_names.include?(node_name)
122
153
  group_data
123
154
  end
124
155
  end
@@ -37,6 +37,22 @@ module Bolt
37
37
 
38
38
  # Now that puppet is loaded we can include puppet mixins in data types
39
39
  Bolt::ResultSet.include_iterable
40
+
41
+ # TODO: This is a hack for PUP-8441 remove it once that is fixed
42
+ require_relative '../../vendored/puppet/lib/puppet/datatypes/impl/error.rb'
43
+ Puppet::DataTypes::Error.class_eval do
44
+ def to_json(opts = nil)
45
+ _pcore_init_hash.to_json(opts)
46
+ end
47
+ end
48
+ end
49
+
50
+ # Create a top-level alias for TargetSpec so that users don't have to
51
+ # namespace it with Boltlib, which is just an implementation detail. This
52
+ # allows TargetSpec to feel like a built-in type in bolt, rather than
53
+ # something has been, no pun intended, "bolted on".
54
+ def add_target_spec(compiler)
55
+ compiler.evaluate_string('type TargetSpec = Boltlib::TargetSpec')
40
56
  end
41
57
 
42
58
  # Runs a block in a PAL script compiler configured for Bolt. Catches
@@ -46,14 +62,9 @@ module Bolt
46
62
  Puppet.initialize_settings(opts)
47
63
  r = Puppet::Pal.in_tmp_environment('bolt', modulepath: [BOLTLIB_PATH] + @config[:modulepath], facts: {}) do |pal|
48
64
  pal.with_script_compiler do |compiler|
65
+ add_target_spec(compiler)
49
66
  begin
50
- result = yield compiler
51
- # TODO: remove after PUP-8441 adds to_json to Errors
52
- # This hack won't handle nested errors
53
- if result.is_a? Puppet::DataTypes::Error
54
- result = result._pcore_init_hash
55
- end
56
- result
67
+ yield compiler
57
68
  rescue Puppet::PreformattedError => err
58
69
  # Puppet sometimes rescues exceptions notes the location and reraises
59
70
  # For now return the original error. Exception cause support was added in Ruby 2.1
@@ -131,7 +142,11 @@ module Bolt
131
142
  task = in_bolt_compiler do |compiler|
132
143
  compiler.task_signature(task_name)
133
144
  end
134
- raise Bolt::CLIError, "Could not find task #{task_name} in your modulepath" if task.nil?
145
+
146
+ if task.nil?
147
+ raise Bolt::CLIError, Bolt::Error.unknown_task(task_name)
148
+ end
149
+
135
150
  task.task_hash
136
151
  end
137
152
 
@@ -145,7 +160,11 @@ module Bolt
145
160
  plan = in_bolt_compiler do |compiler|
146
161
  compiler.plan_signature(plan_name)
147
162
  end
148
- raise Bolt::CLIError, "Could not find plan #{plan_name} in your modulepath" if plan.nil?
163
+
164
+ if plan.nil?
165
+ raise Bolt::CLIError, Bolt::Error.unknown_plan(plan_name)
166
+ end
167
+
149
168
  elements = plan.params_type.elements
150
169
  {
151
170
  'name' => plan_name,
@@ -10,14 +10,6 @@ module Bolt
10
10
  new(hash['uri'], hash['options'])
11
11
  end
12
12
 
13
- def self.from_uri(uri)
14
- new(uri)
15
- end
16
-
17
- def self.parse_urls(urls)
18
- urls.split(/[[:space:],]+/).reject(&:empty?).uniq.map { |url| from_uri(url) }
19
- end
20
-
21
13
  def initialize(uri, options = nil)
22
14
  @uri = uri
23
15
  @uri_obj = parse(uri)
@@ -0,0 +1,159 @@
1
+ require 'logging'
2
+
3
+ module Bolt
4
+ module Transport
5
+ # This class provides the default behavior for Transports. A Transport is
6
+ # responsible for uploading files and running commands, scripts, and tasks
7
+ # on Targets.
8
+ #
9
+ # Bolt executes work on the Transport in "batches". To do that, it calls
10
+ # the batches() method, which is responsible for dividing the list of
11
+ # Targets into batches according to how it wants to handle them. It will
12
+ # then call Transport#batch_task, or the corresponding method for another
13
+ # operation, passing a list of Targets. The Transport returns a list of
14
+ # Bolt::Result objects, one per Target. Each batch is executed on a
15
+ # separate thread, controlled by the `concurrency` setting, so many batches
16
+ # may be running in parallel.
17
+ #
18
+ # The default batch implementation splits the list of Targets into batches
19
+ # of 1. It then calls run_task(), or a corresponding method for other
20
+ # operations, passing in the single Target.
21
+ #
22
+ # Most Transport implementations, like the SSH and WinRM transports, don't
23
+ # need to do their own batching, since they only operate on a single Target
24
+ # at a time. Those Transports can implement the run_task() and related
25
+ # methods, which will automatically handle running many Targets in
26
+ # parallel, and will handle publishing start and finish events for each
27
+ # Target.
28
+ #
29
+ # Transports that need their own batching, like the Orch transport, can
30
+ # instead override the batches() method to split Targets into sets that can
31
+ # be executed together, and override the batch_task() and related methods
32
+ # to execute a batch of nodes. In that case, those Transports should accept
33
+ # a block argument and call it with a :node_start event for each Target
34
+ # before executing, and a :node_result event for each Target after
35
+ # execution.
36
+ class Base
37
+ attr_reader :logger
38
+
39
+ def initialize(_config)
40
+ @logger = Logging.logger[self]
41
+ end
42
+
43
+ def with_events(target, callback)
44
+ callback.call(type: :node_start, target: target) if callback
45
+ result = yield
46
+ @logger.debug("Result on #{target.uri}: #{JSON.dump(result.value)}")
47
+ callback.call(type: :node_result, result: result) if callback
48
+ result
49
+ rescue StandardError => ex
50
+ Bolt::Result.from_exception(target, ex)
51
+ end
52
+
53
+ def filter_options(target, options)
54
+ if target.options[:run_as]
55
+ options.reject { |k, _v| k == '_run_as' }
56
+ else
57
+ options
58
+ end
59
+ end
60
+
61
+ # Raises an error if more than one target was given in the batch.
62
+ #
63
+ # The default implementations of batch_* strictly assume the transport is
64
+ # using the default batch size of 1. This method ensures that is the
65
+ # case and raises an error if it's not.
66
+ def assert_batch_size_one(method, targets)
67
+ if targets.length > 1
68
+ message = "#{self.class.name} must implement #{method} to support batches (got #{targets.length} nodes)"
69
+ raise NotImplementedError, message
70
+ end
71
+ end
72
+
73
+ # Runs the given task on a batch of nodes.
74
+ #
75
+ # The default implementation only supports batches of size 1 and will fail otherwise.
76
+ #
77
+ # Transports may override this method to implement their own batch processing.
78
+ def batch_task(targets, task, arguments, options = {}, &callback)
79
+ assert_batch_size_one("batch_task()", targets)
80
+ target = targets.first
81
+ with_events(target, callback) do
82
+ @logger.debug { "Running task run '#{task}' on #{target.uri}" }
83
+ run_task(target, task, arguments, filter_options(target, options))
84
+ end
85
+ end
86
+
87
+ # Runs the given command on a batch of nodes.
88
+ #
89
+ # The default implementation only supports batches of size 1 and will fail otherwise.
90
+ #
91
+ # Transports may override this method to implement their own batch processing.
92
+ def batch_command(targets, command, options = {}, &callback)
93
+ assert_batch_size_one("batch_command()", targets)
94
+ target = targets.first
95
+ with_events(target, callback) do
96
+ @logger.debug("Running command '#{command}' on #{target.uri}")
97
+ run_command(target, command, filter_options(target, options))
98
+ end
99
+ end
100
+
101
+ # Runs the given script on a batch of nodes.
102
+ #
103
+ # The default implementation only supports batches of size 1 and will fail otherwise.
104
+ #
105
+ # Transports may override this method to implement their own batch processing.
106
+ def batch_script(targets, script, arguments, options = {}, &callback)
107
+ assert_batch_size_one("batch_script()", targets)
108
+ target = targets.first
109
+ with_events(target, callback) do
110
+ @logger.debug { "Running script '#{script}' on #{target.uri}" }
111
+ run_script(target, script, arguments, filter_options(target, options))
112
+ end
113
+ end
114
+
115
+ # Uploads the given source file to the destination location on a batch of nodes.
116
+ #
117
+ # The default implementation only supports batches of size 1 and will fail otherwise.
118
+ #
119
+ # Transports may override this method to implement their own batch processing.
120
+ def batch_upload(targets, source, destination, options = {}, &callback)
121
+ assert_batch_size_one("batch_upload()", targets)
122
+ target = targets.first
123
+ with_events(target, callback) do
124
+ @logger.debug { "Uploading: '#{source}' to #{destination} on #{target.uri}" }
125
+ upload(target, source, destination, filter_options(target, options))
126
+ end
127
+ end
128
+
129
+ # Split the given list of targets into a list of batches. The default
130
+ # implementation returns single-node batches.
131
+ #
132
+ # Transports may override this method, and the corresponding batch_*
133
+ # methods, to implement their own batch processing.
134
+ def batches(targets)
135
+ targets.map { |target| [target] }
136
+ end
137
+
138
+ # Transports should override this method with their own implementation of running a command.
139
+ def run_command(*_args)
140
+ raise NotImplementedError, "run_command() must be implemented by the transport class"
141
+ end
142
+
143
+ # Transports should override this method with their own implementation of running a script.
144
+ def run_script(*_args)
145
+ raise NotImplementedError, "run_script() must be implemented by the transport class"
146
+ end
147
+
148
+ # Transports should override this method with their own implementation of running a task.
149
+ def run_task(*_args)
150
+ raise NotImplementedError, "run_task() must be implemented by the transport class"
151
+ end
152
+
153
+ # Transports should override this method with their own implementation of file upload.
154
+ def upload(*_args)
155
+ raise NotImplementedError, "upload() must be implemented by the transport class"
156
+ end
157
+ end
158
+ end
159
+ end
@@ -0,0 +1,158 @@
1
+ require 'base64'
2
+ require 'concurrent'
3
+ require 'json'
4
+ require 'orchestrator_client'
5
+ require 'bolt/transport/base'
6
+ require 'bolt/result'
7
+
8
+ module Bolt
9
+ module Transport
10
+ class Orch < Base
11
+ CONF_FILE = File.expand_path('~/.puppetlabs/client-tools/orchestrator.conf')
12
+ BOLT_MOCK_TASK = Struct.new(:name, :executable).new('bolt', 'bolt/tasks/init').freeze
13
+
14
+ def initialize(config)
15
+ super
16
+
17
+ client_keys = %i[service-url token-file cacert]
18
+ @client_opts = config.select { |k, _v| client_keys.include?(k) }
19
+ end
20
+
21
+ def create_client
22
+ OrchestratorClient.new(@client_opts, true)
23
+ end
24
+
25
+ def build_request(targets, task, arguments)
26
+ { task: task.name,
27
+ environment: targets.first.options[:orch_task_environment],
28
+ noop: arguments['_noop'],
29
+ params: arguments.reject { |k, _| k == '_noop' },
30
+ scope: {
31
+ nodes: targets.map(&:host)
32
+ } }
33
+ end
34
+
35
+ def process_run_results(targets, results)
36
+ targets_by_name = Hash[targets.map(&:host).zip(targets)]
37
+ results.map do |node_result|
38
+ target = targets_by_name[node_result['name']]
39
+ state = node_result['state']
40
+ result = node_result['result']
41
+
42
+ # If it's finished or already has a proper error simply pass it to the
43
+ # the result otherwise make sure an error is generated
44
+ if state == 'finished' || (result && result['_error'])
45
+ Bolt::Result.new(target, value: result)
46
+ elsif state == 'skipped'
47
+ Bolt::Result.new(
48
+ target,
49
+ value: { '_error' => {
50
+ 'kind' => 'puppetlabs.tasks/skipped-node',
51
+ 'msg' => "Node #{target.host} was skipped",
52
+ 'details' => {}
53
+ } }
54
+ )
55
+ else
56
+ # Make a generic error with a unkown exit_code
57
+ Bolt::Result.for_task(target, result.to_json, '', 'unknown')
58
+ end
59
+ end
60
+ end
61
+
62
+ def batch_command(targets, command, _options = {}, &callback)
63
+ results = run_task_job(targets,
64
+ BOLT_MOCK_TASK,
65
+ action: 'command',
66
+ command: command,
67
+ &callback)
68
+ callback ||= proc {}
69
+ results.map! { |result| unwrap_bolt_result(result.target, result) }
70
+ results.each do |result|
71
+ callback.call(type: :node_result, result: result)
72
+ end
73
+ end
74
+
75
+ def batch_script(targets, script, arguments, _options = {}, &callback)
76
+ content = File.open(script, &:read)
77
+ content = Base64.encode64(content)
78
+ params = {
79
+ action: 'script',
80
+ content: content,
81
+ arguments: arguments
82
+ }
83
+ callback ||= proc {}
84
+ results = run_task_job(targets, BOLT_MOCK_TASK, params, &callback)
85
+ results.map! { |result| unwrap_bolt_result(result.target, result) }
86
+ results.each do |result|
87
+ callback.call(type: :node_result, result: result)
88
+ end
89
+ end
90
+
91
+ def batch_upload(targets, source, destination, _options = {}, &callback)
92
+ content = File.open(source, &:read)
93
+ content = Base64.encode64(content)
94
+ mode = File.stat(source).mode
95
+ params = {
96
+ action: 'upload',
97
+ path: destination,
98
+ content: content,
99
+ mode: mode
100
+ }
101
+ callback ||= proc {}
102
+ results = run_task_job(targets, BOLT_MOCK_TASK, params, &callback)
103
+ results.map! do |result|
104
+ if result.error_hash
105
+ result
106
+ else
107
+ Bolt::Result.for_upload(result.target, source, destination)
108
+ end
109
+ end
110
+ results.each do |result|
111
+ callback.call(type: :node_result, result: result) if callback
112
+ end
113
+ end
114
+
115
+ def batches(targets)
116
+ targets.group_by { |target| target.options[:orch_task_environment] }.values
117
+ end
118
+
119
+ def run_task_job(targets, task, arguments)
120
+ body = build_request(targets, task, arguments)
121
+
122
+ targets.each do |target|
123
+ yield(type: :node_start, target: target) if block_given?
124
+ end
125
+
126
+ begin
127
+ results = create_client.run_task(body)
128
+
129
+ process_run_results(targets, results)
130
+ rescue StandardError => e
131
+ targets.map do |target|
132
+ Bolt::Result.from_exception(target, e)
133
+ end
134
+ end
135
+ end
136
+
137
+ def batch_task(targets, task, arguments, _options = {}, &callback)
138
+ callback ||= proc {}
139
+ results = run_task_job(targets, task, arguments, &callback)
140
+ results.each do |result|
141
+ callback.call(type: :node_result, result: result)
142
+ end
143
+ end
144
+
145
+ # run_task generates a result that makes sense for a generic task which
146
+ # needs to be unwrapped to extract stdout/stderr/exitcode.
147
+ #
148
+ def unwrap_bolt_result(target, result)
149
+ if result.error_hash
150
+ # something went wrong return the failure
151
+ return result
152
+ end
153
+
154
+ Bolt::Result.for_command(target, result.value['stdout'], result.value['stderr'], result.value['exit_code'])
155
+ end
156
+ end
157
+ end
158
+ end