bolt 3.0.1 → 3.1.0

Sign up to get free protection for your applications and to get access to all the features.

Potentially problematic release.


This version of bolt might be problematic. Click here for more details.

checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 16ea129b96b72ea751f0b59599b8783c7c2ad45ebc46004058384693d71795f1
4
- data.tar.gz: 9f227b4c8a2fbe06410090adb6590ddce9e8f4ae29a2fa10f0e2e464fa166c0a
3
+ metadata.gz: fb26f585ef72c98683208ddbed3f04cfeb20a035a252604bf73927354ccae4ba
4
+ data.tar.gz: ad2f911f3e960a4c8ae42586c68466f8526d454d8b227391ac8032614176baa7
5
5
  SHA512:
6
- metadata.gz: 8c6c1b341c1b8d83ec37184a2a9875e0665e4b5a1ed2a6b0c58266b38a541cafffb89df39da659a6ee6157a4140db852d43fa104b3ed7d99400011ec624139a7
7
- data.tar.gz: 7f9a485096374549f2d5f29ff00954c810016c751f54d551c9367212ccf889eced3b71a13e262ebd6916ad2568139298b87081412ef96d091aeddeffbb924684
6
+ metadata.gz: aa283cf489870b48c9b44098c6f4cd0b805ac963c86369481dae603997a3dd150025273ca9168a6c93538ee60acbcfca16fa524951a6e6dcc59a61212d682da9
7
+ data.tar.gz: cd7b62972011560ac21404e0589169f0ab690faaa7f17b8138bc09ad30637ac66fb1823f21714ea30c7526c637c7fec89e3d4fe237ad1ca753a6c1c7f46cc56c
data/Puppetfile CHANGED
@@ -23,6 +23,7 @@ mod 'puppetlabs-zone_core', '1.0.3'
23
23
 
24
24
  # Useful additional modules
25
25
  mod 'puppetlabs-package', '1.4.0'
26
+ mod 'puppetlabs-powershell_task_helper', '0.1.0'
26
27
  mod 'puppetlabs-puppet_conf', '0.8.0'
27
28
  mod 'puppetlabs-python_task_helper', '0.5.0'
28
29
  mod 'puppetlabs-reboot', '3.2.0'
@@ -45,3 +46,4 @@ mod 'puppetlabs-yaml', '0.2.0'
45
46
  mod 'canary', local: true
46
47
  mod 'aggregate', local: true
47
48
  mod 'puppetdb_fact', local: true
49
+ mod 'puppet_connect', local: true
data/lib/bolt/cli.rb CHANGED
@@ -899,7 +899,8 @@ module Bolt
899
899
  # Gem installs include the aggregate, canary, and puppetdb_fact modules, while
900
900
  # package installs include modules listed in the Bolt repo Puppetfile
901
901
  def incomplete_install?
902
- (Dir.children(Bolt::Config::Modulepath::MODULES_PATH) - %w[aggregate canary puppetdb_fact secure_env_vars]).empty?
902
+ builtin_module_list = %w[aggregate canary puppetdb_fact secure_env_vars puppet_connect]
903
+ (Dir.children(Bolt::Config::Modulepath::MODULES_PATH) - builtin_module_list).empty?
903
904
  end
904
905
 
905
906
  # Mimicks the output from Outputter::Human#fatal_error. This should be used to print
data/lib/bolt/pal.rb CHANGED
@@ -215,6 +215,7 @@ module Bolt
215
215
  def with_bolt_executor(executor, inventory, pdb_client = nil, applicator = nil, &block)
216
216
  setup
217
217
  opts = {
218
+ bolt_project: @project,
218
219
  bolt_executor: executor,
219
220
  bolt_inventory: inventory,
220
221
  bolt_pdb_client: pdb_client,
data/lib/bolt/result.rb CHANGED
@@ -203,12 +203,17 @@ module Bolt
203
203
  end
204
204
 
205
205
  def to_data
206
+ serialized_value = safe_value
207
+ if serialized_value.key?('_sensitive') &&
208
+ serialized_value['_sensitive'].is_a?(Puppet::Pops::Types::PSensitiveType::Sensitive)
209
+ serialized_value['_sensitive'] = serialized_value['_sensitive'].to_s
210
+ end
206
211
  {
207
212
  "target" => @target.name,
208
213
  "action" => action,
209
214
  "object" => object,
210
215
  "status" => status,
211
- "value" => safe_value
216
+ "value" => serialized_value
212
217
  }
213
218
  end
214
219
 
@@ -331,10 +331,15 @@ module Bolt
331
331
  # together multiple commands into a single sh invocation
332
332
  commands = [inject_interpreter(options[:interpreter], command)]
333
333
 
334
+ # Let the transport handle adding environment variables if it's custom.
334
335
  if options[:environment]
335
- env_decl = options[:environment].map do |env, val|
336
- "#{env}=#{Shellwords.shellescape(val)}"
337
- end.join(' ')
336
+ if defined? conn.add_env_vars
337
+ conn.add_env_vars(options[:environment])
338
+ else
339
+ env_decl = options[:environment].map do |env, val|
340
+ "#{env}=#{Shellwords.shellescape(val)}"
341
+ end.join(' ')
342
+ end
338
343
  end
339
344
 
340
345
  if escalate
@@ -274,7 +274,12 @@ module Bolt
274
274
  []
275
275
  end
276
276
 
277
- output = execute([Snippets.shell_init, *env_assignments, command].join("\n"))
277
+ output = execute([
278
+ Snippets.shell_init,
279
+ Snippets.append_ps_module_path(dir),
280
+ *env_assignments,
281
+ command
282
+ ].join("\n"))
278
283
 
279
284
  Bolt::Result.for_task(target, output.stdout.string,
280
285
  output.stderr.string,
@@ -55,18 +55,45 @@ module Bolt
55
55
  }
56
56
  #{build_arg_list}
57
57
 
58
+ switch -regex ( Get-ExecutionPolicy )
59
+ {
60
+ '^AllSigned'
61
+ {
62
+ if ((Get-AuthenticodeSignature -File "#{script_path}").Status -ne 'Valid') {
63
+ $Host.UI.WriteErrorLine("Error: Target host Powershell ExecutionPolicy is set to ${_} and script '#{script_path}' does not contain a valid signature.")
64
+ exit 1;
65
+ }
66
+ }
67
+ '^Restricted'
68
+ {
69
+ $Host.UI.WriteErrorLine("Error: Target host Powershell ExecutionPolicy is set to ${_} which denies running any scripts on the target.")
70
+ exit 1;
71
+ }
72
+ }
73
+
74
+ if([string]::IsNullOrEmpty($invokeArgs.ScriptBlock)){
75
+ $Host.UI.WriteErrorLine("Error: Failed to obtain scriptblock from '#{script_path}'. Running scripts might be disabled on this system. For more information, see about_Execution_Policies at https:/go.microsoft.com/fwlink/?LinkID=135170");
76
+ exit 1;
77
+ }
78
+
58
79
  try
59
80
  {
60
81
  Invoke-Command @invokeArgs
61
82
  }
62
83
  catch
63
84
  {
64
- Write-Error $_.Exception
65
- exit 1
85
+ $Host.UI.WriteErrorLine("[$($_.FullyQualifiedErrorId)] Exception $($_.InvocationInfo.PositionMessage).`n$($_.Exception.Message)");
86
+ exit 1;
66
87
  }
67
88
  PS
68
89
  end
69
90
 
91
+ def append_ps_module_path(directory)
92
+ <<~PS
93
+ $env:PSModulePath += ";#{directory}"
94
+ PS
95
+ end
96
+
70
97
  def ps_task(path, arguments)
71
98
  <<~PS
72
99
  $private:tempArgs = Get-ContentAsJson (
@@ -138,7 +165,7 @@ module Bolt
138
165
  [Parameter(Mandatory = $true)] $Text,
139
166
  [Parameter(Mandatory = $false)] [Text.Encoding] $Encoding = [Text.Encoding]::UTF8
140
167
  )
141
-
168
+
142
169
  $Text | ConvertFrom-Json | ConvertFrom-PSCustomObject
143
170
  }
144
171
  PS
@@ -74,15 +74,6 @@ module Bolt
74
74
  interpreters[Pathname(executable).extname] if interpreters
75
75
  end
76
76
 
77
- # Transform a parameter map to an environment variable map, with parameter names prefixed
78
- # with 'PT_' and values transformed to JSON unless they're strings.
79
- def envify_params(params)
80
- params.each_with_object({}) do |(k, v), h|
81
- v = v.to_json unless v.is_a?(String)
82
- h["PT_#{k}"] = v
83
- end
84
- end
85
-
86
77
  # Raises an error if more than one target was given in the batch.
87
78
  #
88
79
  # The default implementations of batch_* strictly assume the transport is
@@ -6,7 +6,7 @@ require 'bolt/transport/base'
6
6
 
7
7
  module Bolt
8
8
  module Transport
9
- class Docker < Base
9
+ class Docker < Simple
10
10
  def provided_features
11
11
  ['shell']
12
12
  end
@@ -16,130 +16,6 @@ module Bolt
16
16
  conn.connect
17
17
  yield conn
18
18
  end
19
-
20
- def upload(target, source, destination, _options = {})
21
- with_connection(target) do |conn|
22
- conn.with_remote_tmpdir do |dir|
23
- basename = File.basename(source)
24
- tmpfile = "#{dir}/#{basename}"
25
- if File.directory?(source)
26
- conn.write_remote_directory(source, tmpfile)
27
- else
28
- conn.write_remote_file(source, tmpfile)
29
- end
30
-
31
- _, stderr, exitcode = conn.execute('mv', tmpfile, destination, {})
32
- if exitcode != 0
33
- message = "Could not move temporary file '#{tmpfile}' to #{destination}: #{stderr}"
34
- raise Bolt::Node::FileError.new(message, 'MV_ERROR')
35
- end
36
- end
37
- Bolt::Result.for_upload(target, source, destination)
38
- end
39
- end
40
-
41
- def download(target, source, destination, _options = {})
42
- with_connection(target) do |conn|
43
- download = File.join(destination, Bolt::Util.unix_basename(source))
44
- conn.download_remote_content(source, destination)
45
- Bolt::Result.for_download(target, source, destination, download)
46
- end
47
- end
48
-
49
- def run_command(target, command, options = {}, position = [])
50
- execute_options = {}
51
- execute_options[:tty] = target.options['tty']
52
- execute_options[:environment] = options[:env_vars]
53
-
54
- if target.options['shell-command'] && !target.options['shell-command'].empty?
55
- # escape any double quotes in command
56
- command = command.gsub('"', '\"')
57
- command = "#{target.options['shell-command']} \" #{command}\""
58
- end
59
- with_connection(target) do |conn|
60
- stdout, stderr, exitcode = conn.execute(*Shellwords.split(command), execute_options)
61
- Bolt::Result.for_command(target,
62
- stdout,
63
- stderr,
64
- exitcode,
65
- 'command',
66
- command,
67
- position)
68
- end
69
- end
70
-
71
- def run_script(target, script, arguments, options = {}, position = [])
72
- # unpack any Sensitive data
73
- arguments = unwrap_sensitive_args(arguments)
74
- execute_options = {}
75
- execute_options[:environment] = options[:env_vars]
76
-
77
- with_connection(target) do |conn|
78
- conn.with_remote_tmpdir do |dir|
79
- remote_path = conn.write_remote_executable(dir, script)
80
- stdout, stderr, exitcode = conn.execute(remote_path, *arguments, execute_options)
81
- Bolt::Result.for_command(target,
82
- stdout,
83
- stderr,
84
- exitcode,
85
- 'script',
86
- script,
87
- position)
88
- end
89
- end
90
- end
91
-
92
- def run_task(target, task, arguments, _options = {}, position = [])
93
- implementation = task.select_implementation(target, provided_features)
94
- executable = implementation['path']
95
- input_method = implementation['input_method']
96
- extra_files = implementation['files']
97
- input_method ||= 'both'
98
-
99
- # unpack any Sensitive data
100
- arguments = unwrap_sensitive_args(arguments)
101
- with_connection(target) do |conn|
102
- execute_options = {}
103
- execute_options[:interpreter] = select_interpreter(executable, target.options['interpreters'])
104
- conn.with_remote_tmpdir do |dir|
105
- if extra_files.empty?
106
- task_dir = dir
107
- else
108
- # TODO: optimize upload of directories
109
- arguments['_installdir'] = dir
110
- task_dir = File.join(dir, task.tasks_dir)
111
- conn.mkdirs([task_dir] + extra_files.map { |file| File.join(dir, File.dirname(file['name'])) })
112
- extra_files.each do |file|
113
- conn.write_remote_file(file['path'], File.join(dir, file['name']))
114
- end
115
- end
116
-
117
- remote_task_path = conn.write_remote_executable(task_dir, executable)
118
-
119
- if Bolt::Task::STDIN_METHODS.include?(input_method)
120
- execute_options[:stdin] = StringIO.new(JSON.dump(arguments))
121
- end
122
-
123
- if Bolt::Task::ENVIRONMENT_METHODS.include?(input_method)
124
- execute_options[:environment] = envify_params(arguments)
125
- end
126
-
127
- stdout, stderr, exitcode = conn.execute(remote_task_path, execute_options)
128
- Bolt::Result.for_task(target,
129
- stdout,
130
- stderr,
131
- exitcode,
132
- task.name,
133
- position)
134
- end
135
- end
136
- end
137
-
138
- def connected?(target)
139
- with_connection(target) { true }
140
- rescue Bolt::Node::ConnectError
141
- false
142
- end
143
19
  end
144
20
  end
145
21
  end
@@ -5,227 +5,152 @@ require 'bolt/node/errors'
5
5
 
6
6
  module Bolt
7
7
  module Transport
8
- class Docker < Base
8
+ class Docker < Simple
9
9
  class Connection
10
+ attr_reader :user, :target
11
+
10
12
  def initialize(target)
11
13
  raise Bolt::ValidationError, "Target #{target.safe_name} does not have a host" unless target.host
12
14
  @target = target
15
+ @user = ENV['USER'] || Etc.getlogin
13
16
  @logger = Bolt::Logger.logger(target.safe_name)
14
- @docker_host = @target.options['service-url']
15
- @logger.trace("Initializing docker connection to #{@target.safe_name}")
17
+ @container_info = {}
18
+ @docker_host = target.options['service-url']
19
+ @logger.trace("Initializing docker connection to #{target.safe_name}")
20
+ end
21
+
22
+ def shell
23
+ @shell ||= if Bolt::Util.windows?
24
+ Bolt::Shell::Powershell.new(target, self)
25
+ else
26
+ Bolt::Shell::Bash.new(target, self)
27
+ end
28
+ end
29
+
30
+ # The full ID of the target container
31
+ #
32
+ # @return [String] The full ID of the target container
33
+ def container_id
34
+ @container_info["Id"]
16
35
  end
17
36
 
18
37
  def connect
19
38
  # We don't actually have a connection, but we do need to
20
39
  # check that the container exists and is running.
21
- output = execute_local_docker_json_command('ps')
22
- index = output.find_index { |item| item["ID"] == @target.host || item["Names"] == @target.host }
23
- raise "Could not find a container with name or ID matching '#{@target.host}'" if index.nil?
40
+ output = execute_local_json_command('ps')
41
+ index = output.find_index { |item| item["ID"] == target.host || item["Names"] == target.host }
42
+ raise "Could not find a container with name or ID matching '#{target.host}'" if index.nil?
24
43
  # Now find the indepth container information
25
- output = execute_local_docker_json_command('inspect', [output[index]["ID"]])
44
+ output = execute_local_json_command('inspect', [output[index]["ID"]])
26
45
  # Store the container information for later
27
46
  @container_info = output[0]
28
47
  @logger.trace { "Opened session" }
29
48
  true
30
49
  rescue StandardError => e
31
50
  raise Bolt::Node::ConnectError.new(
32
- "Failed to connect to #{@target.safe_name}: #{e.message}",
51
+ "Failed to connect to #{target.safe_name}: #{e.message}",
33
52
  'CONNECT_ERROR'
34
53
  )
35
54
  end
36
55
 
37
- # Executes a command inside the target container
38
- #
39
- # @param command [Array] The command to run, expressed as an array of strings
40
- # @param options [Hash] command specific options
41
- # @option opts [String] :interpreter statements that are prefixed to the command e.g `/bin/bash` or `cmd.exe /c`
42
- # @option opts [Hash] :environment A hash of environment variables that will be injected into the command
43
- # @option opts [IO] :stdin An IO object that will be used to redirect STDIN for the docker command
44
- def execute(*command, options)
45
- command.unshift(options[:interpreter]) if options[:interpreter]
46
- # Build the `--env` parameters
47
- envs = []
48
- if options[:environment]
49
- options[:environment].each { |env, val| envs.concat(['--env', "#{env}=#{val}"]) }
56
+ def add_env_vars(env_vars)
57
+ @env_vars = env_vars.each_with_object([]) do |env_var, acc|
58
+ acc << "--env"
59
+ acc << "#{env_var[0]}=#{env_var[1]}"
50
60
  end
61
+ end
51
62
 
52
- command_options = []
53
- # Need to be interactive if redirecting STDIN
54
- command_options << '--interactive' unless options[:stdin].nil?
55
- command_options << '--tty' if options[:tty]
56
- command_options.concat(envs) unless envs.empty?
57
- command_options << container_id
58
- command_options.concat(command)
59
-
60
- @logger.trace { "Executing: exec #{command_options}" }
63
+ # Executes a command inside the target container. This is called from the shell class.
64
+ #
65
+ # @param command [string] The command to run
66
+ def execute(command)
67
+ args = []
68
+ # CODEREVIEW: Is it always safe to pass --interactive?
69
+ args += %w[--interactive]
70
+ args += %w[--tty] if target.options['tty']
71
+ args += %W[--env DOCKER_HOST=#{@docker_host}] if @docker_host
72
+ args += @env_vars if @env_vars
73
+
74
+ if target.options['shell-command'] && !target.options['shell-command'].empty?
75
+ # escape any double quotes in command
76
+ command = command.gsub('"', '\"')
77
+ command = "#{target.options['shell-command']} \"#{command}\""
78
+ end
61
79
 
62
- stdout_str, stderr_str, status = execute_local_docker_command('exec', command_options, options[:stdin])
80
+ docker_command = %w[docker exec] + args + [container_id] + Shellwords.split(command)
81
+ @logger.trace { "Executing: #{docker_command.join(' ')}" }
63
82
 
64
- # The actual result is the exitstatus not the process object
65
- status = status.nil? ? -32768 : status.exitstatus
66
- if status == 0
67
- @logger.trace { "Command returned successfully" }
68
- else
69
- @logger.trace { "Command failed with exit code #{status}" }
70
- end
71
- stdout_str.force_encoding(Encoding::UTF_8)
72
- stderr_str.force_encoding(Encoding::UTF_8)
73
- # Normalise line endings
74
- stdout_str.gsub!("\r\n", "\n")
75
- stderr_str.gsub!("\r\n", "\n")
76
- [stdout_str, stderr_str, status]
83
+ Open3.popen3(*docker_command)
77
84
  rescue StandardError
78
85
  @logger.trace { "Command aborted" }
79
86
  raise
80
87
  end
81
88
 
82
- def write_remote_file(source, destination)
83
- @logger.trace { "Uploading #{source} to #{destination}" }
84
- _, stdout_str, status = execute_local_docker_command('cp', [source, "#{container_id}:#{destination}"])
85
- unless status.exitstatus.zero?
86
- raise "Error writing file to container #{@container_id}: #{stdout_str}"
87
- end
88
- rescue StandardError => e
89
- raise Bolt::Node::FileError.new(e.message, 'WRITE_ERROR')
90
- end
91
-
92
- def write_remote_directory(source, destination)
89
+ def upload_file(source, destination)
93
90
  @logger.trace { "Uploading #{source} to #{destination}" }
94
- _, stdout_str, status = execute_local_docker_command('cp', [source, "#{container_id}:#{destination}"])
91
+ _stdout, stderr, status = execute_local_command('cp', [source, "#{container_id}:#{destination}"])
95
92
  unless status.exitstatus.zero?
96
- raise "Error writing directory to container #{@container_id}: #{stdout_str}"
93
+ raise "Error writing to container #{container_id}: #{stderr}"
97
94
  end
98
95
  rescue StandardError => e
99
96
  raise Bolt::Node::FileError.new(e.message, 'WRITE_ERROR')
100
97
  end
101
98
 
102
- def download_remote_content(source, destination)
99
+ def download_file(source, destination, _download)
103
100
  @logger.trace { "Downloading #{source} to #{destination}" }
104
101
  # Create the destination directory, otherwise copying a source directory with Docker will
105
102
  # copy the *contents* of the directory.
106
103
  # https://docs.docker.com/engine/reference/commandline/cp/
107
104
  FileUtils.mkdir_p(destination)
108
- _, stdout_str, status = execute_local_docker_command('cp', ["#{container_id}:#{source}", destination])
105
+ _stdout, stderr, status = execute_local_command('cp', ["#{container_id}:#{source}", destination])
109
106
  unless status.exitstatus.zero?
110
- raise "Error downloading content from container #{@container_id}: #{stdout_str}"
107
+ raise "Error downloading content from container #{container_id}: #{stderr}"
111
108
  end
112
109
  rescue StandardError => e
113
110
  raise Bolt::Node::FileError.new(e.message, 'WRITE_ERROR')
114
111
  end
115
112
 
116
- def mkdirs(dirs)
117
- _, stderr, exitcode = execute('mkdir', '-p', *dirs, {})
118
- if exitcode != 0
119
- message = "Could not create directories: #{stderr}"
120
- raise Bolt::Node::FileError.new(message, 'MKDIR_ERROR')
121
- end
122
- end
123
-
124
- def make_tmpdir
125
- tmpdir = @target.options.fetch('tmpdir', container_tmpdir)
126
- tmppath = "#{tmpdir}/#{SecureRandom.uuid}"
127
-
128
- stdout, stderr, exitcode = execute('mkdir', '-m', '700', tmppath, {})
129
- if exitcode != 0
130
- raise Bolt::Node::FileError.new("Could not make tmpdir: #{stderr}", 'TMPDIR_ERROR')
131
- end
132
- tmppath || stdout.first
133
- end
134
-
135
- def with_remote_tmpdir
136
- dir = make_tmpdir
137
- yield dir
138
- ensure
139
- if dir
140
- if @target.options['cleanup']
141
- _, stderr, exitcode = execute('rm', '-rf', dir, {})
142
- if exitcode != 0
143
- Bolt::Logger.warn("fail_cleanup", "Failed to clean up tmpdir '#{dir}': #{stderr}")
144
- end
145
- else
146
- Bolt::Logger.warn("skip_cleanup", "Skipping cleanup of tmpdir '#{dir}'")
147
- end
148
- end
149
- end
113
+ # Executes a Docker CLI command. This is useful for running commands as
114
+ # part of this class without having to go through the `execute`
115
+ # function and manage pipes.
116
+ #
117
+ # @param subcommand [String] The docker subcommand to run
118
+ # e.g. 'inspect' for `docker inspect`
119
+ # @param arguments [Array] Arguments to pass to the docker command
120
+ # e.g. 'src' and 'dest' for `docker cp <src> <dest>
121
+ # @return [String, String, Process::Status] The output of the command: STDOUT, STDERR, Process Status
122
+ private def execute_local_command(subcommand, arguments = [])
123
+ # Set the DOCKER_HOST if we are using a non-default service-url
124
+ env_hash = @docker_host.nil? ? {} : { 'DOCKER_HOST' => @docker_host }
125
+ docker_command = [subcommand].concat(arguments)
150
126
 
151
- def write_remote_executable(dir, file, filename = nil)
152
- filename ||= File.basename(file)
153
- remote_path = File.join(dir.to_s, filename)
154
- write_remote_file(file, remote_path)
155
- make_executable(remote_path)
156
- remote_path
127
+ Open3.capture3(env_hash, 'docker', *docker_command, { binmode: true })
157
128
  end
158
129
 
159
- def make_executable(path)
160
- _, stderr, exitcode = execute('chmod', 'u+x', path, {})
161
- if exitcode != 0
162
- message = "Could not make file '#{path}' executable: #{stderr}"
163
- raise Bolt::Node::FileError.new(message, 'CHMOD_ERROR')
164
- end
130
+ # Executes a Docker CLI command and parses the output in JSON format
131
+ #
132
+ # @param subcommand [String] The docker subcommand to run
133
+ # e.g. 'inspect' for `docker inspect`
134
+ # @param arguments [Array] Arguments to pass to the docker command
135
+ # e.g. 'src' and 'dest' for `docker cp <src> <dest>
136
+ # @return [Object] Ruby object representation of the JSON string
137
+ private def execute_local_json_command(subcommand, arguments = [])
138
+ command_options = ['--format', '{{json .}}'].concat(arguments)
139
+ stdout, _stderr, _status = execute_local_command(subcommand, command_options)
140
+ extract_json(stdout)
165
141
  end
166
142
 
167
- private
168
-
169
143
  # Converts the JSON encoded STDOUT string from the docker cli into ruby objects
170
144
  #
171
145
  # @param stdout_string [String] The string to convert
172
146
  # @return [Object] Ruby object representation of the JSON string
173
- def extract_json(stdout_string)
147
+ private def extract_json(stdout)
174
148
  # The output from the docker format command is a JSON string per line.
175
149
  # We can't do a direct convert but this helper method will convert it into
176
150
  # an array of Objects
177
- stdout_string.split("\n")
178
- .reject { |str| str.strip.empty? }
179
- .map { |str| JSON.parse(str) }
180
- end
181
-
182
- # rubocop:disable Layout/LineLength
183
- # Executes a Docker CLI command
184
- #
185
- # @param subcommand [String] The docker subcommand to run e.g. 'inspect' for `docker inspect`
186
- # @param command_options [Array] Additional command options e.g. ['--size'] for `docker inspect --size`
187
- # @param redir_stdin [IO] IO object which will be use to as STDIN in the docker command. Default is nil, which does not perform redirection
188
- # @return [String, String, Process::Status] The output of the command: STDOUT, STDERR, Process Status
189
- # rubocop:enable Layout/LineLength
190
- def execute_local_docker_command(subcommand, command_options = [], redir_stdin = nil)
191
- env_hash = {}
192
- # Set the DOCKER_HOST if we are using a non-default service-url
193
- env_hash['DOCKER_HOST'] = @docker_host unless @docker_host.nil?
194
-
195
- command_options = [] if command_options.nil?
196
- docker_command = [subcommand].concat(command_options)
197
-
198
- # Always use binary mode for any text data
199
- capture_options = { binmode: true }
200
- capture_options[:stdin_data] = redir_stdin unless redir_stdin.nil?
201
- stdout_str, stderr_str, status = Open3.capture3(env_hash, 'docker', *docker_command, capture_options)
202
- [stdout_str, stderr_str, status]
203
- end
204
-
205
- # Executes a Docker CLI command and parses the output in JSON format
206
- #
207
- # @param subcommand [String] The docker subcommand to run e.g. 'inspect' for `docker inspect`
208
- # @param command_options [Array] Additional command options e.g. ['--size'] for `docker inspect --size`
209
- # @return [Object] Ruby object representation of the JSON string
210
- def execute_local_docker_json_command(subcommand, command_options = [])
211
- command_options = [] if command_options.nil?
212
- command_options = ['--format', '{{json .}}'].concat(command_options)
213
- stdout_str, _stderr_str, _status = execute_local_docker_command(subcommand, command_options)
214
- extract_json(stdout_str)
215
- end
216
-
217
- # The full ID of the target container
218
- #
219
- # @return [String] The full ID of the target container
220
- def container_id
221
- @container_info["Id"]
222
- end
223
-
224
- # The temp path inside the target container
225
- #
226
- # @return [String] The absolute path to the temp directory
227
- def container_tmpdir
228
- '/tmp'
151
+ stdout.split("\n")
152
+ .reject { |str| str.strip.empty? }
153
+ .map { |str| JSON.parse(str) }
229
154
  end
230
155
  end
231
156
  end
data/lib/bolt/version.rb CHANGED
@@ -1,5 +1,5 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module Bolt
4
- VERSION = '3.0.1'
4
+ VERSION = '3.1.0'
5
5
  end
@@ -133,7 +133,14 @@ module BoltServer
133
133
  task_data = body['task']
134
134
  task = Bolt::Task::PuppetServer.new(task_data['name'], task_data['metadata'], task_data['files'], @file_cache)
135
135
  parameters = body['parameters'] || {}
136
- [@executor.run_task(target, task, parameters), nil]
136
+ task_result = @executor.run_task(target, task, parameters)
137
+ task_result.each do |result|
138
+ value = result.value
139
+ next unless value.is_a?(Hash)
140
+ next unless value.key?('_sensitive')
141
+ value['_sensitive'] = value['_sensitive'].unwrap
142
+ end
143
+ [task_result, nil]
137
144
  end
138
145
 
139
146
  def run_command(target, body)
@@ -275,15 +282,19 @@ module BoltServer
275
282
  Bolt::Config.from_project(project, { log: { 'bolt-debug.log' => 'disable' } })
276
283
  end
277
284
 
285
+ def pal_from_project_bolt_config(bolt_config)
286
+ modulepath_object = Bolt::Config::Modulepath.new(
287
+ bolt_config.modulepath,
288
+ boltlib_path: [PE_BOLTLIB_PATH, Bolt::Config::Modulepath::BOLTLIB_PATH],
289
+ builtin_content_path: @config['builtin-content-dir']
290
+ )
291
+ Bolt::PAL.new(modulepath_object, nil, nil, nil, nil, nil, bolt_config.project)
292
+ end
293
+
278
294
  def in_bolt_project(versioned_project)
279
295
  @pal_mutex.synchronize do
280
296
  bolt_config = config_from_project(versioned_project)
281
- modulepath_object = Bolt::Config::Modulepath.new(
282
- bolt_config.modulepath,
283
- boltlib_path: [PE_BOLTLIB_PATH, Bolt::Config::Modulepath::BOLTLIB_PATH],
284
- builtin_content_path: @config['builtin-content-dir']
285
- )
286
- pal = Bolt::PAL.new(modulepath_object, nil, nil, nil, nil, nil, bolt_config.project)
297
+ pal = pal_from_project_bolt_config(bolt_config)
287
298
  context = {
288
299
  pal: pal,
289
300
  config: bolt_config
@@ -351,8 +362,8 @@ module BoltServer
351
362
  }
352
363
  end
353
364
 
354
- def allowed_helper(metadata, allowlist)
355
- allowed = allowlist.nil? || allowlist.include?(metadata['name'])
365
+ def allowed_helper(pal, metadata, allowlist)
366
+ allowed = !pal.filter_content([metadata['name']], allowlist).empty?
356
367
  metadata.merge({ 'allowed' => allowed })
357
368
  end
358
369
 
@@ -366,21 +377,27 @@ module BoltServer
366
377
  plans.map { |plan_name| { 'name' => plan_name } }
367
378
  end
368
379
 
369
- def file_metadatas(pal, module_name, file)
370
- pal.in_bolt_compiler do
371
- mod = Puppet.lookup(:current_environment).module(module_name)
372
- raise ArgumentError, "`module_name`: #{module_name} does not exist" unless mod
373
- abs_file_path = mod.file(file)
374
- raise ArgumentError, "`file`: #{file} does not exist inside the module's 'files' directory" unless abs_file_path
375
- fileset = Puppet::FileServing::Fileset.new(abs_file_path, 'recurse' => 'yes')
376
- Puppet::FileServing::Fileset.merge(fileset).collect do |relative_file_path, base_path|
377
- metadata = Puppet::FileServing::Metadata.new(base_path, relative_path: relative_file_path)
378
- metadata.checksum_type = 'sha256'
379
- metadata.links = 'follow'
380
- metadata.collect
381
- metadata.to_data_hash
380
+ def file_metadatas(versioned_project, module_name, file)
381
+ abs_file_path = @pal_mutex.synchronize do
382
+ bolt_config = config_from_project(versioned_project)
383
+ pal = pal_from_project_bolt_config(bolt_config)
384
+ pal.in_bolt_compiler do
385
+ mod = Puppet.lookup(:current_environment).module(module_name)
386
+ raise ArgumentError, "`module_name`: #{module_name} does not exist" unless mod
387
+ mod.file(file)
382
388
  end
383
389
  end
390
+
391
+ raise ArgumentError, "`file`: #{file} does not exist inside the module's 'files' directory" unless abs_file_path
392
+
393
+ fileset = Puppet::FileServing::Fileset.new(abs_file_path, 'recurse' => 'yes')
394
+ Puppet::FileServing::Fileset.merge(fileset).collect do |relative_file_path, base_path|
395
+ metadata = Puppet::FileServing::Metadata.new(base_path, relative_path: relative_file_path)
396
+ metadata.checksum_type = 'sha256'
397
+ metadata.links = 'follow'
398
+ metadata.collect
399
+ metadata.to_data_hash
400
+ end
384
401
  end
385
402
 
386
403
  get '/' do
@@ -520,7 +537,7 @@ module BoltServer
520
537
  return MISSING_VERSIONED_PROJECT_RESPONSE if params['versioned_project'].nil?
521
538
  in_bolt_project(params['versioned_project']) do |context|
522
539
  plan_info = pe_plan_info(context[:pal], params[:module_name], params[:plan_name])
523
- plan_info = allowed_helper(plan_info, context[:config].project.plans)
540
+ plan_info = allowed_helper(context[:pal], plan_info, context[:config].project.plans)
524
541
  [200, plan_info.to_json]
525
542
  end
526
543
  rescue Bolt::Error => e
@@ -550,7 +567,7 @@ module BoltServer
550
567
  'versioned_project' => params['versioned_project']
551
568
  }
552
569
  task_info = pe_task_info(context[:pal], params[:module_name], params[:task_name], ps_parameters)
553
- task_info = allowed_helper(task_info, context[:config].project.tasks)
570
+ task_info = allowed_helper(context[:pal], task_info, context[:config].project.tasks)
554
571
  [200, task_info.to_json]
555
572
  end
556
573
  rescue Bolt::Error => e
@@ -590,7 +607,7 @@ module BoltServer
590
607
  plans_response = plan_list(context[:pal])
591
608
 
592
609
  # Dig in context for the allowlist of plans from project object
593
- plans_response.map! { |metadata| allowed_helper(metadata, context[:config].project.plans) }
610
+ plans_response.map! { |metadata| allowed_helper(context[:pal], metadata, context[:config].project.plans) }
594
611
 
595
612
  # We structure this array of plans to be an array of hashes so that it matches the structure
596
613
  # returned by the puppetserver API that serves data like this. Structuring the output this way
@@ -626,7 +643,7 @@ module BoltServer
626
643
  tasks_response = task_list(context[:pal])
627
644
 
628
645
  # Dig in context for the allowlist of tasks from project object
629
- tasks_response.map! { |metadata| allowed_helper(metadata, context[:config].project.tasks) }
646
+ tasks_response.map! { |metadata| allowed_helper(context[:pal], metadata, context[:config].project.tasks) }
630
647
 
631
648
  # We structure this array of tasks to be an array of hashes so that it matches the structure
632
649
  # returned by the puppetserver API that serves data like this. Structuring the output this way
@@ -642,12 +659,11 @@ module BoltServer
642
659
  #
643
660
  # @param versioned_project [String] the versioned_project to fetch the file metadatas from
644
661
  get '/project_file_metadatas/:module_name/*' do
645
- return MISSING_VERSIONED_PROJECT_RESPONSE if params['versioned_project'].nil?
646
- in_bolt_project(params['versioned_project']) do |context|
647
- file = params[:splat].first
648
- metadatas = file_metadatas(context[:pal], params[:module_name], file)
649
- [200, metadatas.to_json]
650
- end
662
+ versioned_project = params['versioned_project']
663
+ return MISSING_VERSIONED_PROJECT_RESPONSE if versioned_project.nil?
664
+ file = params[:splat].first
665
+ metadatas = file_metadatas(versioned_project, params[:module_name], file)
666
+ [200, metadatas.to_json]
651
667
  rescue Bolt::Error => e
652
668
  [400, e.to_json]
653
669
  rescue ArgumentError => e
@@ -130,11 +130,16 @@ module BoltSpec
130
130
  @executor ||= BoltSpec::Plans::MockExecutor.new(modulepath)
131
131
  end
132
132
 
133
- # Override in your tests
133
+ # Overrides inventory for tests.
134
134
  def inventory_data
135
135
  {}
136
136
  end
137
137
 
138
+ # Overrides configuration for tests.
139
+ def config_data
140
+ {}
141
+ end
142
+
138
143
  def inventory
139
144
  @inventory ||= Bolt::Inventory.create_version(inventory_data, config.transport, config.transports, plugins)
140
145
  end
@@ -142,7 +147,7 @@ module BoltSpec
142
147
  # Override in your tests
143
148
  def config
144
149
  @config ||= begin
145
- conf = Bolt::Config.default
150
+ conf = Bolt::Config.new(Bolt::Project.default_project, config_data)
146
151
  conf.modulepath = [modulepath].flatten
147
152
  conf
148
153
  end
@@ -161,7 +166,7 @@ module BoltSpec
161
166
  BoltSpec::Plans::MOCKED_ACTIONS.each do |action|
162
167
  # Allowed action stubs can be called up to be_called_times number of times
163
168
  define_method :"allow_#{action}" do |object|
164
- executor.send(:"stub_#{action}", object).add_stub
169
+ executor.send(:"stub_#{action}", object).add_stub(inventory)
165
170
  end
166
171
 
167
172
  # Expected action stubs must be called exactly the expected number of times
@@ -172,7 +177,7 @@ module BoltSpec
172
177
 
173
178
  # This stub will catch any action call if there are no stubs specifically for that task
174
179
  define_method :"allow_any_#{action}" do
175
- executor.send(:"stub_#{action}", :default).add_stub
180
+ executor.send(:"stub_#{action}", :default).add_stub(inventory)
176
181
  end
177
182
  end
178
183
 
@@ -5,119 +5,11 @@ require 'bolt_spec/plans/mock_executor'
5
5
  require 'bolt/pal'
6
6
 
7
7
  # These helpers are intended to be used for plan unit testing without calling
8
- # out to target nodes. It uses the BoltContext helper to set up a mock executor
8
+ # out to targets. It uses the BoltContext helper to set up a mock executor
9
9
  # which allows calls to run_* functions to be stubbed for testing. The context
10
10
  # helper also loads Bolt datatypes and plan functions to be used by the code
11
11
  # being tested.
12
12
  #
13
- # Stub matching
14
- #
15
- # Stubs match invocations of run_* functions by default matching any call but
16
- # with_targets and with_params helpers can further restrict the stub to match
17
- # more exact invocations. It's possible a call to run_* could match multiple
18
- # stubs. In this case the mock executor will first check for stubs specifically
19
- # matching the task being run after which it will use the last stub that
20
- # matched
21
- #
22
- #
23
- # allow vs expect
24
- #
25
- # Stubs have two general modes bases on whether the test is making assertions
26
- # on whether function was called. Allow stubs allow the run_* invocation to
27
- # be called any number of times while expect stubs will fail if no run_*
28
- # invocation matches them. The be_called_times(n) stub method can be used to
29
- # ensure an allow stub is not called more than n times or that an expect stub
30
- # is called exactly n times.
31
- #
32
- # Configuration
33
- #
34
- # To configure Puppet and Bolt at the beginning of tests, add the following
35
- # line to your spec_helper.rb:
36
- #
37
- # BoltSpec::Plans.init
38
- #
39
- # By default the plan helpers use the modulepath set up for rspec-puppet and
40
- # an otherwise empty bolt config and inventory. To create your own values for
41
- # these override the modulepath, config, or inventory methods.
42
- #
43
- # Sub-plan Execution
44
- #
45
- # When testing a plan, often times those plans call other plans in order to
46
- # build complex workflows. To support this we offer running in two different
47
- # modes:
48
- # execute_any_plan (default) - This mode will execute any plan that is encountered
49
- # without having to be stubbed/mocked. This default mode allows for plan control
50
- # flow to behave as normal. If you choose to stub/mock out a sub-plan in this mode
51
- # that will be honored and the sub-plan will not be executed. We will use the modifiers
52
- # on the stub to check for the conditions specified (example: be_called_times(3))
53
- #
54
- # execute_no_plan - This mode will not execute a plans that it encounters. Instead, when
55
- # a plan is encountered it will throw an error unless the plan is mocked out. This
56
- # mode is useful for ensuring that there are no plans called that you do not expect.
57
- # This plan requires authors to mock out all sub-plans that may be invoked when running
58
- # tests.
59
- #
60
- # TODO:
61
- # - Allow description based stub matching
62
- # - Better testing of plan errors
63
- # - Better error collection around call counts. Show what stubs exists and more than a single failure
64
- # - Allow stubbing with a block(at the double level? As a matched stub?)
65
- # - package code so that it can be used for testing modules outside of this repo
66
- # - set subject from describe and provide matchers similar to rspec puppets function tests
67
- # - Allow specific plans to be executed when running in execute_no_plan mode.
68
- #
69
- # MAYBE TODO?:
70
- # - validate call expectations at the end of the example instead of in run_plan
71
- # - resultset matchers to help testing canary like plans?
72
- # - inventory matchers to help testing plans that change inventory
73
- #
74
- # Flags:
75
- # - execute_any_plan: execute any plan that is encountered unless it is mocked (default)
76
- # - execute_no_plan: throw an error if a plan is encountered that is not stubbed
77
- #
78
- # Stubs:
79
- # - allow_command(cmd), expect_command(cmd): expect the exact command
80
- # - allow_plan(plan), expect_plan(plan): expect the named plan
81
- # - allow_script(script), expect_script(script): expect the script as <module>/path/to/file
82
- # - allow_task(task), expect_task(task): expect the named task
83
- # - allow_download(file), expect_download(file): expect the identified source file
84
- # - allow_upload(file), expect_upload(file): expect the identified source file
85
- # - allow_apply_prep: allows `apply_prep` to be invoked in the plan but does not allow modifiers
86
- # - allow_apply: allows `apply` to be invoked in the plan but does not allow modifiers
87
- # - allow_out_message, expect_out_message: expect a message to be passed to out::message (only modifiers are
88
- # be_called_times(n), with_params(params), and not_be_called)
89
- #
90
- # Stub modifiers:
91
- # - be_called_times(n): if allowed, fail if the action is called more than 'n' times
92
- # if expected, fail unless the action is called 'n' times
93
- # - not_be_called: fail if the action is called
94
- # - with_targets(targets): target or list of targets that you expect to be passed to the action
95
- # plan: does not support this modifier
96
- # - with_params(params): list of params and metaparams (or options) that you expect to be passed to the action.
97
- # Corresponds to the action's last argument.
98
- # - with_destination(dest): for upload_file and download_file, the expected destination path
99
- # - always_return(value): return a Bolt::ResultSet of Bolt::Result objects with the specified value Hash
100
- # plan: returns a Bolt::PlanResult with the specified value with a status of 'success'
101
- # command and script: only accept 'stdout' and 'stderr' keys
102
- # upload: does not support this modifier
103
- # download: does not support this modifier
104
- # - return_for_targets(targets_to_values): return a Bolt::ResultSet of Bolt::Result objects from the Hash mapping
105
- # targets to their value Hashes
106
- # command and script: only accept 'stdout' and 'stderr' keys
107
- # upload: does not support this modifier
108
- # download: does not support this modifier
109
- # plan: does not support this modifier
110
- # - return(&block): invoke the block to construct a Bolt::ResultSet. The blocks parameters differ based on action
111
- # command: `{ |targets:, command:, params:| ... }`
112
- # plan: `{ |plan:, params:| ... }`
113
- # script: `{ |targets:, script:, params:| ... }`
114
- # task: `{ |targets:, task:, params:| ... }`
115
- # upload: `{ |targets:, source:, destination:, params:| ... }`
116
- # download: `{ |targets:, source:, destination:, params:| ... }`
117
- # - error_with(err): return a failing Bolt::ResultSet, with Bolt::Result objects with the identified err hash
118
- # plans will throw a Bolt::PlanFailure that will be returned as the value of
119
- # the Bolt::PlanResult object with a status of 'failure'.
120
- #
121
13
  # Example:
122
14
  # describe "my_plan" do
123
15
  # it 'should return' do
@@ -14,14 +14,50 @@
14
14
  plan puppet_connect::test_input_data(TargetSpec $targets = 'all') {
15
15
  $targs = get_targets($targets)
16
16
  $targs.each |$target| {
17
- if $target.transport != 'ssh' and $target.transport != 'winrm' {
18
- fail_plan("Inventory contains target ${target} with unsupported transport, must be ssh or winrm")
19
- }
20
- if $target.transport == 'ssh' {
21
- # Disable SSH autoloading to prevent false positive results
22
- # (input data is wrong but target is still connectable due
23
- # to autoloaded config)
24
- set_config($target, ['ssh', 'load-config'], false)
17
+ case $target.transport {
18
+ 'ssh': {
19
+ $private_key_config = dig($target.config, 'ssh', 'private-key')
20
+ if $private_key_config =~ String {
21
+ $msg = @("END")
22
+ The SSH private key of the ${$target} target points to a filepath on disk,
23
+ which is not allowed in Puppet Connect. Instead, the private key contents must
24
+ be specified and this should be done via the PuppetConnectData plugin. Below is
25
+ an example of a Puppet Connect-compatible specification of the private-key. First,
26
+ we start with the inventory file:
27
+ ...
28
+ private-key:
29
+ _plugin: puppet_connect_data
30
+ key: ssh_private_key
31
+ ...
32
+
33
+ Next is the corresponding entry in the input data file:
34
+ ...
35
+ ssh_private_key:
36
+ key-data:
37
+ <private_key_contents>
38
+ ...
39
+ | END
40
+
41
+ out::message($msg)
42
+ fail_plan("The SSH private key of the ${$target} target points to a filepath on disk")
43
+ }
44
+
45
+ # Disable SSH autoloading to prevent false positive results
46
+ # (input data is wrong but target is still connectable due
47
+ # to autoloaded config)
48
+ set_config($target, ['ssh', 'load-config'], false)
49
+ # Maintain configuration parity with Puppet Connect to improve
50
+ # the reliability of our test
51
+ set_config($target, ['ssh', 'host-key-check'], false)
52
+ }
53
+ 'winrm': {
54
+ # Maintain configuration parity with Puppet Connect
55
+ set_config($target, ['winrm', 'ssl'], false)
56
+ set_config($target, ['winrm', 'ssl-verify'], false)
57
+ }
58
+ default: {
59
+ fail_plan("Inventory contains target ${target} with unsupported transport, must be ssh or winrm")
60
+ }
25
61
  }
26
62
  }
27
63
  # The SSH/WinRM transports will report an 'unknown host' error for targets where
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: bolt
3
3
  version: !ruby/object:Gem::Version
4
- version: 3.0.1
4
+ version: 3.1.0
5
5
  platform: ruby
6
6
  authors:
7
7
  - Puppet
8
8
  autorequire:
9
9
  bindir: exe
10
10
  cert_chain: []
11
- date: 2021-02-16 00:00:00.000000000 Z
11
+ date: 2021-03-01 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: addressable