foreman_remote_execution_core 1.4.8 → 1.5.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 465e9c409286d4bd5501a4e69ce110c449ef4ab4288ef401276231e3498feaad
4
- data.tar.gz: 73bc72cbac7038f7356499f638e517f00ffb9396eb36c63c5751fb985958cd22
3
+ metadata.gz: f9959aa1c94136ee271de608d9f9767f46e0c6d628f8a043420dfac7e54177c8
4
+ data.tar.gz: 7a961ebb1cf1b044e005f43b3ce727b9966fbcf7c17bfcd21307a20db408b662
5
5
  SHA512:
6
- metadata.gz: d7bd2eab26b331d80adb66481fa1e3d34b698fbdfab781000216f93e9c627b36f46ba8c614c8c6f286b92c6cd39db59448d524fb2ea159c4535e2a40fa44b786
7
- data.tar.gz: 621b23721f0346fb4dcf9b70133174f7bf7c1c24d917f7f3bc8f0f8595d9814ff1a977888927e370b32c4674bf62313c91d48850b5523e18a6c5a65c98db5a94
6
+ metadata.gz: d3d3cbc724bf1d38f8d9e9a2ef0be35fa604ee8482ec7036a3b6d23cd97b86fb405ff7de4bc9b2c988b8036c6b7ef14d9dcbb2fec8899d14c1668620be8b50fe
7
+ data.tar.gz: 5abf6e164e855b066efdd70bf7b0770acc62ed76aa3c742a28f6b546f79f4042e9e98d4c4f59da06b96bcb66061f124f8a4e7e4b2575057c393dc8ae9a3ea707
@@ -1,20 +1,6 @@
1
- require 'foreman_tasks_core/shareable_action'
2
-
3
1
  module ForemanRemoteExecutionCore
4
2
  module Actions
5
- class RunScript < ForemanTasksCore::Runner::Action
6
- def initiate_runner
7
- additional_options = {
8
- :step_id => run_step_id,
9
- :uuid => execution_plan_id,
10
- }
11
- ForemanRemoteExecutionCore.runner_class.build(input.merge(additional_options),
12
- suspended_action: suspended_action)
13
- end
14
-
15
- def runner_dispatcher
16
- ForemanRemoteExecutionCore::Dispatcher.instance
17
- end
18
- end
3
+ require 'smart_proxy_remote_execution_ssh/actions/run_script'
4
+ RunScript = Proxy::RemoteExecution::Ssh::Actions::RunScript
19
5
  end
20
6
  end
@@ -1,3 +1,3 @@
1
1
  module ForemanRemoteExecutionCore
2
- VERSION = '1.4.8'.freeze
2
+ VERSION = '1.5.0'.freeze
3
3
  end
@@ -1,90 +1,8 @@
1
- require 'foreman_tasks_core'
2
-
3
1
  module ForemanRemoteExecutionCore
4
- extend ForemanTasksCore::SettingsLoader
5
- register_settings([:remote_execution_ssh, :smart_proxy_remote_execution_ssh_core],
6
- :ssh_identity_key_file => '~/.ssh/id_rsa_foreman_proxy',
7
- :ssh_user => 'root',
8
- :remote_working_dir => '/var/tmp',
9
- :local_working_dir => '/var/tmp',
10
- :kerberos_auth => false,
11
- :async_ssh => false,
12
- # When set to nil, makes REX use the runner's default interval
13
- :runner_refresh_interval => nil,
14
- :ssh_log_level => :fatal,
15
- :cleanup_working_dirs => true)
16
-
17
- SSH_LOG_LEVELS = %w(debug info warn error fatal).freeze
18
-
19
- def self.simulate?
20
- %w(yes true 1).include? ENV.fetch('REX_SIMULATE', '').downcase
21
- end
22
-
23
- def self.validate_settings!
24
- super
25
- self.validate_ssh_log_level!
26
- @settings[:ssh_log_level] = @settings[:ssh_log_level].to_sym
27
- end
28
-
29
- def self.validate_ssh_log_level!
30
- wanted_level = @settings[:ssh_log_level].to_s
31
- unless SSH_LOG_LEVELS.include? wanted_level
32
- raise "Wrong value '#{@settings[:ssh_log_level]}' for ssh_log_level, must be one of #{SSH_LOG_LEVELS.join(', ')}"
33
- end
34
-
35
- current = if defined?(::Proxy::SETTINGS)
36
- ::Proxy::SETTINGS.log_level.to_s.downcase
37
- elsif defined?(SmartProxyDynflowCore::SETTINGS)
38
- SmartProxyDynflowCore::SETTINGS.log_level.to_s.downcase
39
- else
40
- Rails.configuration.log_level.to_s
41
- end
2
+ require 'smart_proxy_remote_execution_ssh'
3
+ require 'foreman_remote_execution_core/actions'
42
4
 
43
- # regular log levels correspond to upcased ssh logger levels
44
- ssh, regular = [wanted_level, current].map do |wanted|
45
- SSH_LOG_LEVELS.each_with_index.find { |value, _index| value == wanted }.last
46
- end
47
-
48
- if ssh < regular
49
- raise 'ssh_log_level cannot be more verbose than regular log level'
50
- end
51
- end
52
-
53
- def self.runner_class
54
- @runner_class ||= if simulate?
55
- FakeScriptRunner
56
- elsif settings[:async_ssh]
57
- PollingScriptRunner
58
- else
59
- ScriptRunner
60
- end
61
- end
62
-
63
- if ForemanTasksCore.dynflow_present?
64
- require 'foreman_tasks_core/runner'
65
- require 'foreman_remote_execution_core/log_filter'
66
- require 'foreman_remote_execution_core/utils'
67
- if simulate?
68
- # Load the fake implementation of the script runner if debug is enabled
69
- require 'foreman_remote_execution_core/fake_script_runner'
70
- else
71
- require 'foreman_remote_execution_core/script_runner'
72
- require 'foreman_remote_execution_core/polling_script_runner'
73
- end
74
- require 'foreman_remote_execution_core/dispatcher'
75
- require 'foreman_remote_execution_core/actions'
76
-
77
- # rubocop:disable Lint/SuppressedException
78
- begin
79
- require 'smart_proxy_dynflow_core/task_launcher_registry'
80
- rescue LoadError
81
- end
82
- # rubocop:enable Lint/SuppressedException
83
-
84
- if defined?(::SmartProxyDynflowCore)
85
- SmartProxyDynflowCore::TaskLauncherRegistry.register('ssh', ForemanTasksCore::TaskLauncher::Batch)
86
- end
5
+ def self.settings
6
+ Proxy::RemoteExecution::Ssh::Plugin.settings
87
7
  end
88
-
89
- require 'foreman_remote_execution_core/version'
90
8
  end
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: foreman_remote_execution_core
3
3
  version: !ruby/object:Gem::Version
4
- version: 1.4.8
4
+ version: 1.5.0
5
5
  platform: ruby
6
6
  authors:
7
7
  - Ivan Nečas
8
8
  autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2021-08-17 00:00:00.000000000 Z
11
+ date: 2021-06-07 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: bcrypt_pbkdf
@@ -58,14 +58,28 @@ dependencies:
58
58
  requirements:
59
59
  - - ">="
60
60
  - !ruby/object:Gem::Version
61
- version: 4.2.0
61
+ version: '0'
62
+ type: :runtime
63
+ prerelease: false
64
+ version_requirements: !ruby/object:Gem::Requirement
65
+ requirements:
66
+ - - ">="
67
+ - !ruby/object:Gem::Version
68
+ version: '0'
69
+ - !ruby/object:Gem::Dependency
70
+ name: smart_proxy_remote_execution_ssh
71
+ requirement: !ruby/object:Gem::Requirement
72
+ requirements:
73
+ - - ">="
74
+ - !ruby/object:Gem::Version
75
+ version: 0.4.0
62
76
  type: :runtime
63
77
  prerelease: false
64
78
  version_requirements: !ruby/object:Gem::Requirement
65
79
  requirements:
66
80
  - - ">="
67
81
  - !ruby/object:Gem::Version
68
- version: 4.2.0
82
+ version: 0.4.0
69
83
  description: " Ssh remote execution provider code sharable between Foreman and Foreman-Proxy\n"
70
84
  email:
71
85
  - inecas@redhat.com
@@ -76,14 +90,6 @@ files:
76
90
  - LICENSE
77
91
  - lib/foreman_remote_execution_core.rb
78
92
  - lib/foreman_remote_execution_core/actions.rb
79
- - lib/foreman_remote_execution_core/async_scripts/control.sh
80
- - lib/foreman_remote_execution_core/async_scripts/retrieve.sh
81
- - lib/foreman_remote_execution_core/dispatcher.rb
82
- - lib/foreman_remote_execution_core/fake_script_runner.rb
83
- - lib/foreman_remote_execution_core/log_filter.rb
84
- - lib/foreman_remote_execution_core/polling_script_runner.rb
85
- - lib/foreman_remote_execution_core/script_runner.rb
86
- - lib/foreman_remote_execution_core/utils.rb
87
93
  - lib/foreman_remote_execution_core/version.rb
88
94
  homepage: https://github.com/theforeman/foreman_remote_execution
89
95
  licenses:
@@ -1,110 +0,0 @@
1
- #!/bin/sh
2
- #
3
- # Control script for the remote execution jobs.
4
- #
5
- # The initial script calls `$CONTROL_SCRIPT init-script-finish` once the original script exits.
6
- # In automatic mode, the exit code is sent back to the proxy on `init-script-finish`.
7
- #
8
- # What the script provides is also a manual mode, where the author of the rex script can take
9
- # full control of the job lifecycle. This allows keeping the marked as running even when
10
- # the initial script finishes.
11
- #
12
- # The manual mode is turned on by calling `$CONTROL_SCRIPT manual-control`. After calling this,
13
- # one can call `echo message | $CONTROL_SCRIPT update` to send output to the remote execution jobs
14
- # and `$CONTROL_SCRIPT finish 0` once finished (with 0 as exit code) to send output to the remote execution jobs
15
- # and `$CONTROL_SCRIPT finish 0` once finished (with 0 as exit code)
16
- BASE_DIR="$(dirname "$(readlink -f "$0")")"
17
-
18
- if ! command -v curl >/dev/null; then
19
- echo 'curl is required' >&2
20
- exit 1
21
- fi
22
-
23
- # send the callback data to proxy
24
- update() {
25
- "$BASE_DIR/retrieve.sh" push_update
26
- }
27
-
28
- # wait for named pipe $1 to retrieve data. If $2 is provided, it serves as timeout
29
- # in seconds on how long to wait when reading.
30
- wait_for_pipe() {
31
- pipe_path=$1
32
- if [ -n "$2" ]; then
33
- timeout="-t $2"
34
- fi
35
- if read $timeout <>"$pipe_path"; then
36
- rm "$pipe_path"
37
- return 0
38
- else
39
- return 1
40
- fi
41
- }
42
-
43
- # function run in background, when receiving update data via STDIN.
44
- periodic_update() {
45
- interval=1
46
- # reading some data from periodic_update_control signals we're done
47
- while ! wait_for_pipe "$BASE_DIR/periodic_update_control" "$interval"; do
48
- update
49
- done
50
- # one more update before we finish
51
- update
52
- # signal the main process that we are finished
53
- echo > "$BASE_DIR/periodic_update_finished"
54
- }
55
-
56
- # signal the periodic_update process that the main process is finishing
57
- periodic_update_finish() {
58
- if [ -e "$BASE_DIR/periodic_update_control" ]; then
59
- echo > "$BASE_DIR/periodic_update_control"
60
- fi
61
- }
62
-
63
- ACTION=${1:-finish}
64
-
65
- case "$ACTION" in
66
- init-script-finish)
67
- if ! [ -e "$BASE_DIR/manual_mode" ]; then
68
- # make the exit code of initialization script the exit code of the whole job
69
- cp init_exit_code exit_code
70
- update
71
- fi
72
- ;;
73
- finish)
74
- # take exit code passed via the command line, with fallback
75
- # to the exit code of the initialization script
76
- exit_code=${2:-$(cat "$BASE_DIR/init_exit_code")}
77
- echo $exit_code > "$BASE_DIR/exit_code"
78
- update
79
- if [ -e "$BASE_DIR/manual_mode" ]; then
80
- rm "$BASE_DIR/manual_mode"
81
- fi
82
- ;;
83
- update)
84
- # read data from input when redirected though a pipe
85
- if ! [ -t 0 ]; then
86
- # couple of named pipes to coordinate the main process with the periodic_update
87
- mkfifo "$BASE_DIR/periodic_update_control"
88
- mkfifo "$BASE_DIR/periodic_update_finished"
89
- trap "periodic_update_finish" EXIT
90
- # run periodic update as separate process to keep sending updates in output to server
91
- periodic_update &
92
- # redirect the input into output
93
- tee -a "$BASE_DIR/output"
94
- periodic_update_finish
95
- # ensure the periodic update finished before we return
96
- wait_for_pipe "$BASE_DIR/periodic_update_finished"
97
- else
98
- update
99
- fi
100
- ;;
101
- # mark the script to be in manual mode: this means the script author needs to use `update` and `finish`
102
- # commands to send output to the remote execution job or mark it as finished.
103
- manual-mode)
104
- touch "$BASE_DIR/manual_mode"
105
- ;;
106
- *)
107
- echo "Unknown action $ACTION"
108
- exit 1
109
- ;;
110
- esac
@@ -1,151 +0,0 @@
1
- #!/bin/sh
2
-
3
- if ! pgrep --help 2>/dev/null >/dev/null; then
4
- echo DONE 1
5
- echo "pgrep is required" >&2
6
- exit 1
7
- fi
8
-
9
- BASE_DIR="$(dirname "$(readlink -f "$0")")"
10
-
11
- # load the data required for generating the callback
12
- . "$BASE_DIR/env.sh"
13
- URL_PREFIX="$CALLBACK_HOST/dynflow/tasks/$TASK_ID"
14
- AUTH="$TASK_ID:$OTP"
15
- CURL="curl --silent --show-error --fail --max-time 10"
16
-
17
- MY_LOCK_FILE="$BASE_DIR/retrieve_lock.$$"
18
- MY_PID=$$
19
- echo $MY_PID >"$MY_LOCK_FILE"
20
- LOCK_FILE="$BASE_DIR/retrieve_lock"
21
- TMP_OUTPUT_FILE="$BASE_DIR/tmp_output"
22
-
23
- RUN_TIMEOUT=30 # for how long can the script hold the lock
24
- WAIT_TIMEOUT=60 # for how long the script is trying to acquire the lock
25
- START_TIME=$(date +%s)
26
-
27
- fail() {
28
- echo RUNNING
29
- echo "$1"
30
- exit 1
31
- }
32
-
33
- acquire_lock() {
34
- # try to acquire lock by creating the file (ln should be atomic an fail in case
35
- # another process succeeded first). We also check the content of the lock file,
36
- # in case our process won when competing over the lock while invalidating
37
- # the lock on timeout.
38
- ln "$MY_LOCK_FILE" "$LOCK_FILE" 2>/dev/null || [ "$(head -n1 "$LOCK_FILE")" = "$MY_PID" ]
39
- return $?
40
- }
41
-
42
- # acquiring the lock before proceeding, to ensure only one instance of the script is running
43
- while ! acquire_lock; do
44
- # we failed to create retrieve_lock - assuming there is already another retrieve script running
45
- current_pid=$(head -n1 "$LOCK_FILE")
46
- if [ -z "$current_pid" ]; then
47
- continue
48
- fi
49
- # check whether the lock is not too old (compared to $RUN_TIMEOUT) and try to kill
50
- # if it is, so that we don't have a stalled processes here
51
- lock_lines_count=$(wc -l < "$LOCK_FILE")
52
- current_lock_time=$(stat --format "%Y" "$LOCK_FILE")
53
- current_time=$(date +%s)
54
-
55
- if [ "$(( current_time - START_TIME ))" -gt "$WAIT_TIMEOUT" ]; then
56
- # We were waiting for the lock for too long - just give up
57
- fail "Wait time exceeded $WAIT_TIMEOUT"
58
- elif [ "$(( current_time - current_lock_time ))" -gt "$RUN_TIMEOUT" ]; then
59
- # The previous lock it hold for too long - re-acquiring procedure
60
- if [ "$lock_lines_count" -gt 1 ]; then
61
- # there were multiple processes waiting for lock without resolution
62
- # longer than the $RUN_TIMEOUT - we reset the lock file and let processes
63
- # to compete
64
- echo "RETRY" > "$LOCK_FILE"
65
- fi
66
- if [ "$current_pid" != "RETRY" ]; then
67
- # try to kill the currently stalled process
68
- kill -9 "$current_pid" 2>/dev/null
69
- fi
70
- # try to add our process as one candidate
71
- echo $MY_PID >> "$LOCK_FILE"
72
- if [ "$( head -n2 "$LOCK_FILE" | tail -n1 )" = "$MY_PID" ]; then
73
- # our process won the competition for the new lock: it is the first pid
74
- # after the original one in the lock file - take ownership of the lock
75
- # next iteration only this process will get through
76
- echo $MY_PID >"$LOCK_FILE"
77
- fi
78
- else
79
- # still waiting for the original owner to finish
80
- sleep 1
81
- fi
82
- done
83
-
84
- release_lock() {
85
- rm "$MY_LOCK_FILE"
86
- rm "$LOCK_FILE"
87
- }
88
- # ensure the release the lock at exit
89
- trap "release_lock" EXIT
90
-
91
- # make sure we clear previous tmp output file
92
- if [ -e "$TMP_OUTPUT_FILE" ]; then
93
- rm "$TMP_OUTPUT_FILE"
94
- fi
95
-
96
- pid=$(cat "$BASE_DIR/pid")
97
- [ -f "$BASE_DIR/position" ] || echo 1 > "$BASE_DIR/position"
98
- position=$(cat "$BASE_DIR/position")
99
-
100
- prepare_output() {
101
- if [ -e "$BASE_DIR/manual_mode" ] || ([ -n "$pid" ] && pgrep -P "$pid" >/dev/null 2>&1); then
102
- echo RUNNING
103
- else
104
- echo "DONE $(cat "$BASE_DIR/exit_code" 2>/dev/null)"
105
- fi
106
- [ -f "$BASE_DIR/output" ] || exit 0
107
- tail --bytes "+${position}" "$BASE_DIR/output" > "$TMP_OUTPUT_FILE"
108
- cat "$TMP_OUTPUT_FILE"
109
- }
110
-
111
- # prepare the callback payload
112
- payload() {
113
- if [ -n "$1" ]; then
114
- exit_code="$1"
115
- else
116
- exit_code=null
117
- fi
118
-
119
- if [ -e "$BASE_DIR/manual_mode" ]; then
120
- manual_mode=true
121
- output=$(prepare_output | base64 -w0)
122
- else
123
- manual_mode=false
124
- fi
125
-
126
- echo "{ \"exit_code\": $exit_code,"\
127
- " \"step_id\": \"$STEP_ID\","\
128
- " \"manual_mode\": $manual_mode,"\
129
- " \"output\": \"$output\" }"
130
- }
131
-
132
- if [ "$1" = "push_update" ]; then
133
- if [ -e "$BASE_DIR/exit_code" ]; then
134
- exit_code="$(cat "$BASE_DIR/exit_code")"
135
- action="done"
136
- else
137
- exit_code=""
138
- action="update"
139
- fi
140
- $CURL -X POST -d "$(payload $exit_code)" -u "$AUTH" "$URL_PREFIX"/$action 2>>"$BASE_DIR/curl_stderr"
141
- success=$?
142
- else
143
- prepare_output
144
- success=$?
145
- fi
146
-
147
- if [ "$success" = 0 ] && [ -e "$TMP_OUTPUT_FILE" ]; then
148
- # in case the retrieval was successful, move the position of the cursor to be read next time
149
- bytes=$(wc --bytes < "$TMP_OUTPUT_FILE")
150
- expr "${position}" + "${bytes}" > "$BASE_DIR/position"
151
- fi
@@ -1,12 +0,0 @@
1
- require 'foreman_tasks_core/runner/dispatcher'
2
-
3
- module ForemanRemoteExecutionCore
4
- class Dispatcher < ::ForemanTasksCore::Runner::Dispatcher
5
-
6
- def refresh_interval
7
- @refresh_interval ||= ForemanRemoteExecutionCore.settings[:runner_refresh_interval] ||
8
- ForemanRemoteExecutionCore.runner_class::DEFAULT_REFRESH_INTERVAL
9
- end
10
-
11
- end
12
- end
@@ -1,87 +0,0 @@
1
- module ForemanRemoteExecutionCore
2
- class FakeScriptRunner < ForemanTasksCore::Runner::Base
3
- DEFAULT_REFRESH_INTERVAL = 1
4
-
5
- @data = []
6
-
7
- class << self
8
- attr_accessor :data
9
-
10
- def load_data(path = nil)
11
- if path.nil?
12
- @data = <<-END.gsub(/^\s+\| ?/, '').lines
13
- | ====== Simulated Remote Execution ======
14
- |
15
- | This is an output of a simulated remote
16
- | execution run. It should run for about
17
- | 5 seconds and finish successfully.
18
- END
19
- else
20
- File.open(File.expand_path(path), 'r') do |f|
21
- @data = f.readlines.map(&:chomp)
22
- end
23
- end
24
- @data.freeze
25
- end
26
-
27
- def build(options, suspended_action:)
28
- new(options, suspended_action: suspended_action)
29
- end
30
- end
31
-
32
- def initialize(*args)
33
- super
34
- # Load the fake output the first time its needed
35
- self.class.load_data(ENV['REX_SIMULATE_PATH']) unless self.class.data.frozen?
36
- @position = 0
37
- end
38
-
39
- def start
40
- refresh
41
- end
42
-
43
- # Do one step
44
- def refresh
45
- if done?
46
- finish
47
- else
48
- step
49
- end
50
- end
51
-
52
- def kill
53
- finish
54
- end
55
-
56
- private
57
-
58
- def finish
59
- publish_exit_status exit_code
60
- end
61
-
62
- def step
63
- publish_data(next_chunk, 'stdout')
64
- end
65
-
66
- def done?
67
- @position == self.class.data.count
68
- end
69
-
70
- def next_chunk
71
- output = self.class.data[@position]
72
- @position += 1
73
- output
74
- end
75
-
76
- # Decide if the execution should fail or not
77
- def exit_code
78
- fail_chance = ENV.fetch('REX_SIMULATE_FAIL_CHANCE', 0).to_i
79
- fail_exitcode = ENV.fetch('REX_SIMULATE_EXIT', 0).to_i
80
- if fail_exitcode == 0 || fail_chance < (Random.rand * 100).round
81
- 0
82
- else
83
- fail_exitcode
84
- end
85
- end
86
- end
87
- end
@@ -1,14 +0,0 @@
1
- module ForemanRemoteExecutionCore
2
- class LogFilter < ::Logger
3
- def initialize(base_logger)
4
- @base_logger = base_logger
5
- end
6
-
7
- def add(severity, *args, &block)
8
- severity ||= ::Logger::UNKNOWN
9
- return true if @base_logger.nil? || severity < @level
10
-
11
- @base_logger.add(severity, *args, &block)
12
- end
13
- end
14
- end
@@ -1,136 +0,0 @@
1
- require 'base64'
2
-
3
- module ForemanRemoteExecutionCore
4
- class PollingScriptRunner < ScriptRunner
5
-
6
- DEFAULT_REFRESH_INTERVAL = 60
7
-
8
- def self.load_script(name)
9
- script_dir = File.expand_path('../async_scripts', __FILE__)
10
- File.read(File.join(script_dir, name))
11
- end
12
-
13
- # The script that controls the flow of the job, able to initiate update or
14
- # finish on the task, or take over the control over script lifecycle
15
- CONTROL_SCRIPT = load_script('control.sh')
16
-
17
- # The script always outputs at least one line
18
- # First line of the output either has to begin with
19
- # "RUNNING" or "DONE $EXITCODE"
20
- # The following lines are treated as regular output
21
- RETRIEVE_SCRIPT = load_script('retrieve.sh')
22
-
23
- def initialize(options, user_method, suspended_action: nil)
24
- super(options, user_method, suspended_action: suspended_action)
25
- @callback_host = options[:callback_host]
26
- @task_id = options[:uuid]
27
- @step_id = options[:step_id]
28
- @otp = ForemanTasksCore::OtpManager.generate_otp(@task_id)
29
- end
30
-
31
- def prepare_start
32
- super
33
- @base_dir = File.dirname @remote_script
34
- upload_control_scripts
35
- end
36
-
37
- def initialization_script
38
- close_stdin = '</dev/null'
39
- close_fds = close_stdin + ' >/dev/null 2>/dev/null'
40
- main_script = "(#{@remote_script} #{close_stdin} 2>&1; echo $?>#{@base_dir}/init_exit_code) >#{@base_dir}/output"
41
- control_script_finish = "#{@control_script_path} init-script-finish"
42
- <<-SCRIPT.gsub(/^ +\| /, '')
43
- | export CONTROL_SCRIPT="#{@control_script_path}"
44
- | sh -c '#{main_script}; #{control_script_finish}' #{close_fds} &
45
- | echo $! > '#{@base_dir}/pid'
46
- SCRIPT
47
- end
48
-
49
- def trigger(*args)
50
- run_sync(*args)
51
- end
52
-
53
- def refresh
54
- err = output = nil
55
- begin
56
- _, output, err = run_sync("#{@user_method.cli_command_prefix} #{@retrieval_script}")
57
- rescue => e
58
- @logger.info("Error while connecting to the remote host on refresh: #{e.message}")
59
- end
60
- return if output.nil? || output.empty?
61
-
62
- lines = output.lines
63
- result = lines.shift.match(/^DONE (\d+)?/)
64
- publish_data(lines.join, 'stdout') unless lines.empty?
65
- publish_data(err, 'stderr') unless err.empty?
66
- if result
67
- exitcode = result[1] || 0
68
- publish_exit_status(exitcode.to_i)
69
- cleanup
70
- end
71
- ensure
72
- destroy_session
73
- end
74
-
75
- def external_event(event)
76
- data = event.data
77
- if data['manual_mode']
78
- load_event_updates(data)
79
- else
80
- # getting the update from automatic mode - reaching to the host to get the latest update
81
- return run_refresh
82
- end
83
- ensure
84
- destroy_session
85
- end
86
-
87
- def close
88
- super
89
- ForemanTasksCore::OtpManager.drop_otp(@task_id, @otp) if @otp
90
- end
91
-
92
- def upload_control_scripts
93
- return if @control_scripts_uploaded
94
-
95
- cp_script_to_remote(env_script, 'env.sh')
96
- @control_script_path = cp_script_to_remote(CONTROL_SCRIPT, 'control.sh')
97
- @retrieval_script = cp_script_to_remote(RETRIEVE_SCRIPT, 'retrieve.sh')
98
- @control_scripts_uploaded = true
99
- end
100
-
101
- # Script setting the dynamic values to env variables: it's sourced from other control scripts
102
- def env_script
103
- <<-SCRIPT.gsub(/^ +\| /, '')
104
- | CALLBACK_HOST="#{@callback_host}"
105
- | TASK_ID="#{@task_id}"
106
- | STEP_ID="#{@step_id}"
107
- | OTP="#{@otp}"
108
- SCRIPT
109
- end
110
-
111
- private
112
-
113
- # Generates updates based on the callback data from the manual mode
114
- def load_event_updates(event_data)
115
- continuous_output = ForemanTasksCore::ContinuousOutput.new
116
- if event_data.key?('output')
117
- lines = Base64.decode64(event_data['output']).sub(/\A(RUNNING|DONE).*\n/, '')
118
- continuous_output.add_output(lines, 'stdout')
119
- end
120
- cleanup if event_data['exit_code']
121
- new_update(continuous_output, event_data['exit_code'])
122
- end
123
-
124
- def cleanup
125
- run_sync("rm -rf \"#{remote_command_dir}\"") if @cleanup_working_dirs
126
- end
127
-
128
- def destroy_session
129
- if @session
130
- @logger.debug("Closing session with #{@ssh_user}@#{@host}")
131
- @session.close
132
- @session = nil
133
- end
134
- end
135
- end
136
- end
@@ -1,475 +0,0 @@
1
- require 'net/ssh'
2
- require 'fileutils'
3
-
4
- # rubocop:disable Lint/SuppressedException
5
- begin
6
- require 'net/ssh/krb'
7
- rescue LoadError; end
8
- # rubocop:enable Lint/SuppressedException:
9
-
10
- module ForemanRemoteExecutionCore
11
- class EffectiveUserMethod
12
- attr_reader :effective_user, :ssh_user, :effective_user_password, :password_sent
13
-
14
- def initialize(effective_user, ssh_user, effective_user_password)
15
- @effective_user = effective_user
16
- @ssh_user = ssh_user
17
- @effective_user_password = effective_user_password.to_s
18
- @password_sent = false
19
- end
20
-
21
- def on_data(received_data, ssh_channel)
22
- if received_data.match(login_prompt)
23
- ssh_channel.send_data(effective_user_password + "\n")
24
- @password_sent = true
25
- end
26
- end
27
-
28
- def filter_password?(received_data)
29
- !@effective_user_password.empty? && @password_sent && received_data.match(Regexp.escape(@effective_user_password))
30
- end
31
-
32
- def sent_all_data?
33
- effective_user_password.empty? || password_sent
34
- end
35
-
36
- def reset
37
- @password_sent = false
38
- end
39
-
40
- def cli_command_prefix
41
- end
42
-
43
- def login_prompt
44
- end
45
- end
46
-
47
- class SudoUserMethod < EffectiveUserMethod
48
- LOGIN_PROMPT = 'rex login: '.freeze
49
-
50
- def login_prompt
51
- LOGIN_PROMPT
52
- end
53
-
54
- def cli_command_prefix
55
- "sudo -p '#{LOGIN_PROMPT}' -u #{effective_user} "
56
- end
57
- end
58
-
59
- class DzdoUserMethod < EffectiveUserMethod
60
- LOGIN_PROMPT = /password/i.freeze
61
-
62
- def login_prompt
63
- LOGIN_PROMPT
64
- end
65
-
66
- def cli_command_prefix
67
- "dzdo -u #{effective_user} "
68
- end
69
- end
70
-
71
- class SuUserMethod < EffectiveUserMethod
72
- LOGIN_PROMPT = /Password: /i.freeze
73
-
74
- def login_prompt
75
- LOGIN_PROMPT
76
- end
77
-
78
- def cli_command_prefix
79
- "su - #{effective_user} -c "
80
- end
81
- end
82
-
83
- class NoopUserMethod
84
- def on_data(_, _)
85
- end
86
-
87
- def filter_password?(received_data)
88
- false
89
- end
90
-
91
- def sent_all_data?
92
- true
93
- end
94
-
95
- def cli_command_prefix
96
- end
97
-
98
- def reset
99
- end
100
- end
101
-
102
- class ScriptRunner < ForemanTasksCore::Runner::Base
103
- attr_reader :execution_timeout_interval
104
-
105
- EXPECTED_POWER_ACTION_MESSAGES = ['restart host', 'shutdown host'].freeze
106
- DEFAULT_REFRESH_INTERVAL = 1
107
- MAX_PROCESS_RETRIES = 4
108
- VERIFY_HOST_KEY = Gem::Version.create(Net::SSH::Version::STRING) < Gem::Version.create('5.0.0') ||
109
- :accept_new_or_local_tunnel
110
-
111
- def initialize(options, user_method, suspended_action: nil)
112
- super suspended_action: suspended_action
113
- @host = options.fetch(:hostname)
114
- @script = options.fetch(:script)
115
- @ssh_user = options.fetch(:ssh_user, 'root')
116
- @ssh_port = options.fetch(:ssh_port, 22)
117
- @ssh_password = options.fetch(:secrets, {}).fetch(:ssh_password, nil)
118
- @key_passphrase = options.fetch(:secrets, {}).fetch(:key_passphrase, nil)
119
- @host_public_key = options.fetch(:host_public_key, nil)
120
- @verify_host = options.fetch(:verify_host, nil)
121
- @execution_timeout_interval = options.fetch(:execution_timeout_interval, nil)
122
-
123
- @client_private_key_file = settings.fetch(:ssh_identity_key_file)
124
- @local_working_dir = options.fetch(:local_working_dir, settings.fetch(:local_working_dir))
125
- @remote_working_dir = options.fetch(:remote_working_dir, settings.fetch(:remote_working_dir))
126
- @cleanup_working_dirs = options.fetch(:cleanup_working_dirs, settings.fetch(:cleanup_working_dirs))
127
- @first_execution = options.fetch(:first_execution, false)
128
- @user_method = user_method
129
- end
130
-
131
- def self.build(options, suspended_action:)
132
- effective_user = options.fetch(:effective_user, nil)
133
- ssh_user = options.fetch(:ssh_user, 'root')
134
- effective_user_method = options.fetch(:effective_user_method, 'sudo')
135
-
136
- user_method = if effective_user.nil? || effective_user == ssh_user
137
- NoopUserMethod.new
138
- elsif effective_user_method == 'sudo'
139
- SudoUserMethod.new(effective_user, ssh_user,
140
- options.fetch(:secrets, {}).fetch(:effective_user_password, nil))
141
- elsif effective_user_method == 'dzdo'
142
- DzdoUserMethod.new(effective_user, ssh_user,
143
- options.fetch(:secrets, {}).fetch(:effective_user_password, nil))
144
- elsif effective_user_method == 'su'
145
- SuUserMethod.new(effective_user, ssh_user,
146
- options.fetch(:secrets, {}).fetch(:effective_user_password, nil))
147
- else
148
- raise "effective_user_method '#{effective_user_method}' not supported"
149
- end
150
-
151
- new(options, user_method, suspended_action: suspended_action)
152
- end
153
-
154
- def start
155
- Utils.prune_known_hosts!(@host, @ssh_port, logger) if @first_execution
156
- prepare_start
157
- script = initialization_script
158
- logger.debug("executing script:\n#{indent_multiline(script)}")
159
- trigger(script)
160
- rescue StandardError, NotImplementedError => e
161
- logger.error("error while initalizing command #{e.class} #{e.message}:\n #{e.backtrace.join("\n")}")
162
- publish_exception('Error initializing command', e)
163
- end
164
-
165
- def trigger(*args)
166
- run_async(*args)
167
- end
168
-
169
- def prepare_start
170
- @remote_script = cp_script_to_remote
171
- @output_path = File.join(File.dirname(@remote_script), 'output')
172
- @exit_code_path = File.join(File.dirname(@remote_script), 'exit_code')
173
- end
174
-
175
- # the script that initiates the execution
176
- def initialization_script
177
- su_method = @user_method.instance_of?(ForemanRemoteExecutionCore::SuUserMethod)
178
- # pipe the output to tee while capturing the exit code in a file
179
- <<-SCRIPT.gsub(/^\s+\| /, '')
180
- | sh -c "(#{@user_method.cli_command_prefix}#{su_method ? "'#{@remote_script} < /dev/null '" : "#{@remote_script} < /dev/null"}; echo \\$?>#{@exit_code_path}) | /usr/bin/tee #{@output_path}
181
- | exit \\$(cat #{@exit_code_path})"
182
- SCRIPT
183
- end
184
-
185
- def refresh
186
- return if @session.nil?
187
-
188
- with_retries do
189
- with_disconnect_handling do
190
- @session.process(0)
191
- end
192
- end
193
- ensure
194
- check_expecting_disconnect
195
- end
196
-
197
- def kill
198
- if @session
199
- run_sync("pkill -f #{remote_command_file('script')}")
200
- else
201
- logger.debug('connection closed')
202
- end
203
- rescue => e
204
- publish_exception('Unexpected error', e, false)
205
- end
206
-
207
- def timeout
208
- @logger.debug('job timed out')
209
- super
210
- end
211
-
212
- def timeout_interval
213
- execution_timeout_interval
214
- end
215
-
216
- def with_retries
217
- tries = 0
218
- begin
219
- yield
220
- rescue => e
221
- logger.error("Unexpected error: #{e.class} #{e.message}\n #{e.backtrace.join("\n")}")
222
- tries += 1
223
- if tries <= MAX_PROCESS_RETRIES
224
- logger.error('Retrying')
225
- retry
226
- else
227
- publish_exception('Unexpected error', e)
228
- end
229
- end
230
- end
231
-
232
- def with_disconnect_handling
233
- yield
234
- rescue IOError, Net::SSH::Disconnect => e
235
- @session.shutdown!
236
- check_expecting_disconnect
237
- if @expecting_disconnect
238
- publish_exit_status(0)
239
- else
240
- publish_exception('Unexpected disconnect', e)
241
- end
242
- end
243
-
244
- def close
245
- run_sync("rm -rf \"#{remote_command_dir}\"") if should_cleanup?
246
- rescue => e
247
- publish_exception('Error when removing remote working dir', e, false)
248
- ensure
249
- @session.close if @session && !@session.closed?
250
- FileUtils.rm_rf(local_command_dir) if Dir.exist?(local_command_dir) && @cleanup_working_dirs
251
- end
252
-
253
- def publish_data(data, type)
254
- super(data.force_encoding('UTF-8'), type)
255
- end
256
-
257
- private
258
-
259
- def indent_multiline(string)
260
- string.lines.map { |line| " | #{line}" }.join
261
- end
262
-
263
- def should_cleanup?
264
- @session && !@session.closed? && @cleanup_working_dirs
265
- end
266
-
267
- def session
268
- @session ||= begin
269
- @logger.debug("opening session to #{@ssh_user}@#{@host}")
270
- Net::SSH.start(@host, @ssh_user, ssh_options)
271
- end
272
- end
273
-
274
- def ssh_options
275
- ssh_options = {}
276
- ssh_options[:port] = @ssh_port if @ssh_port
277
- ssh_options[:keys] = [@client_private_key_file] if @client_private_key_file
278
- ssh_options[:password] = @ssh_password if @ssh_password
279
- ssh_options[:passphrase] = @key_passphrase if @key_passphrase
280
- ssh_options[:keys_only] = true
281
- # if the host public key is contained in the known_hosts_file,
282
- # verify it, otherwise, if missing, import it and continue
283
- ssh_options[:verify_host_key] = VERIFY_HOST_KEY
284
- ssh_options[:auth_methods] = available_authentication_methods
285
- ssh_options[:user_known_hosts_file] = prepare_known_hosts if @host_public_key
286
- ssh_options[:number_of_password_prompts] = 1
287
- ssh_options[:verbose] = settings[:ssh_log_level]
288
- ssh_options[:logger] = ForemanRemoteExecutionCore::LogFilter.new(SmartProxyDynflowCore::Log.instance)
289
- return ssh_options
290
- end
291
-
292
- def settings
293
- ForemanRemoteExecutionCore.settings
294
- end
295
-
296
- # Initiates run of the remote command and yields the data when
297
- # available. The yielding doesn't happen automatically, but as
298
- # part of calling the `refresh` method.
299
- def run_async(command)
300
- raise 'Async command already in progress' if @started
301
-
302
- @started = false
303
- @user_method.reset
304
-
305
- session.open_channel do |channel|
306
- channel.request_pty
307
- channel.on_data do |ch, data|
308
- publish_data(data, 'stdout') unless @user_method.filter_password?(data)
309
- @user_method.on_data(data, ch)
310
- end
311
- channel.on_extended_data { |ch, type, data| publish_data(data, 'stderr') }
312
- # standard exit of the command
313
- channel.on_request('exit-status') { |ch, data| publish_exit_status(data.read_long) }
314
- # on signal: sending the signal value (such as 'TERM')
315
- channel.on_request('exit-signal') do |ch, data|
316
- publish_exit_status(data.read_string)
317
- ch.close
318
- # wait for the channel to finish so that we know at the end
319
- # that the session is inactive
320
- ch.wait
321
- end
322
- channel.exec(command) do |_, success|
323
- @started = true
324
- raise('Error initializing command') unless success
325
- end
326
- end
327
- session.process(0) { !run_started? }
328
- return true
329
- end
330
-
331
- def run_started?
332
- @started && @user_method.sent_all_data?
333
- end
334
-
335
- def run_sync(command, stdin = nil)
336
- stdout = ''
337
- stderr = ''
338
- exit_status = nil
339
- started = false
340
-
341
- channel = session.open_channel do |ch|
342
- ch.on_data do |c, data|
343
- stdout.concat(data)
344
- end
345
- ch.on_extended_data { |_, _, data| stderr.concat(data) }
346
- ch.on_request('exit-status') { |_, data| exit_status = data.read_long }
347
- # Send data to stdin if we have some
348
- ch.send_data(stdin) unless stdin.nil?
349
- # on signal: sending the signal value (such as 'TERM')
350
- ch.on_request('exit-signal') do |_, data|
351
- exit_status = data.read_string
352
- ch.close
353
- ch.wait
354
- end
355
- ch.exec command do |_, success|
356
- raise 'could not execute command' unless success
357
-
358
- started = true
359
- end
360
- end
361
- session.process(0) { !started }
362
- # Closing the channel without sending any data gives us SIGPIPE
363
- channel.close unless stdin.nil?
364
- channel.wait
365
- return exit_status, stdout, stderr
366
- end
367
-
368
- def prepare_known_hosts
369
- path = local_command_file('known_hosts')
370
- if @host_public_key
371
- write_command_file_locally('known_hosts', "#{@host} #{@host_public_key}")
372
- end
373
- return path
374
- end
375
-
376
- def local_command_dir
377
- File.join(@local_working_dir, 'foreman-proxy', "foreman-ssh-cmd-#{@id}")
378
- end
379
-
380
- def local_command_file(filename)
381
- File.join(local_command_dir, filename)
382
- end
383
-
384
- def remote_command_dir
385
- File.join(@remote_working_dir, "foreman-ssh-cmd-#{id}")
386
- end
387
-
388
- def remote_command_file(filename)
389
- File.join(remote_command_dir, filename)
390
- end
391
-
392
- def ensure_local_directory(path)
393
- if File.exist?(path)
394
- raise "#{path} expected to be a directory" unless File.directory?(path)
395
- else
396
- FileUtils.mkdir_p(path)
397
- end
398
- return path
399
- end
400
-
401
- def cp_script_to_remote(script = @script, name = 'script')
402
- path = remote_command_file(name)
403
- @logger.debug("copying script to #{path}:\n#{indent_multiline(script)}")
404
- upload_data(sanitize_script(script), path, 555)
405
- end
406
-
407
- def upload_data(data, path, permissions = 555)
408
- ensure_remote_directory File.dirname(path)
409
- # We use tee here to pipe stdin coming from ssh to a file at $path, while silencing its output
410
- # This is used to write to $path with elevated permissions, solutions using cat and output redirection
411
- # would not work, because the redirection would happen in the non-elevated shell.
412
- command = "tee '#{path}' >/dev/null && chmod '#{permissions}' '#{path}'"
413
-
414
- @logger.debug("Sending data to #{path} on remote host:\n#{data}")
415
- status, _out, err = run_sync(command, data)
416
-
417
- @logger.warn("Output on stderr while uploading #{path}:\n#{err}") unless err.empty?
418
- if status != 0
419
- raise "Unable to upload file to #{path} on remote system: exit code: #{status}"
420
- end
421
-
422
- path
423
- end
424
-
425
- def upload_file(local_path, remote_path)
426
- mode = File.stat(local_path).mode.to_s(8)[-3..-1]
427
- @logger.debug("Uploading local file: #{local_path} as #{remote_path} with #{mode} permissions")
428
- upload_data(File.read(local_path), remote_path, mode)
429
- end
430
-
431
- def ensure_remote_directory(path)
432
- exit_code, _output, err = run_sync("mkdir -p #{path}")
433
- if exit_code != 0
434
- raise "Unable to create directory on remote system #{path}: exit code: #{exit_code}\n #{err}"
435
- end
436
- end
437
-
438
- def sanitize_script(script)
439
- script.tr("\r", '')
440
- end
441
-
442
- def write_command_file_locally(filename, content)
443
- path = local_command_file(filename)
444
- ensure_local_directory(File.dirname(path))
445
- File.write(path, content)
446
- return path
447
- end
448
-
449
- # when a remote server disconnects, it's hard to tell if it was on purpose (when calling reboot)
450
- # or it's an error. When it's expected, we expect the script to produce 'restart host' as
451
- # its last command output
452
- def check_expecting_disconnect
453
- last_output = @continuous_output.raw_outputs.find { |d| d['output_type'] == 'stdout' }
454
- return unless last_output
455
-
456
- if EXPECTED_POWER_ACTION_MESSAGES.any? { |message| last_output['output'] =~ /^#{message}/ }
457
- @expecting_disconnect = true
458
- end
459
- end
460
-
461
- def available_authentication_methods
462
- methods = %w(publickey) # Always use pubkey auth as fallback
463
- if settings[:kerberos_auth]
464
- if defined? Net::SSH::Kerberos
465
- methods << 'gssapi-with-mic'
466
- else
467
- @logger.warn('Kerberos authentication requested but not available')
468
- end
469
- end
470
- methods.unshift('password') if @ssh_password
471
-
472
- methods
473
- end
474
- end
475
- end
@@ -1,24 +0,0 @@
1
- require 'open3'
2
-
3
- module ForemanRemoteExecutionCore
4
- module Utils
5
- class << self
6
- def prune_known_hosts!(hostname, port, logger = Logger.new($stdout))
7
- return if Net::SSH::KnownHosts.search_for(hostname).empty?
8
-
9
- target = if port == 22
10
- hostname
11
- else
12
- "[#{hostname}]:#{port}"
13
- end
14
-
15
- Open3.popen3('ssh-keygen', '-R', target) do |_stdin, stdout, _stderr, wait_thr|
16
- wait_thr.join
17
- stdout.read
18
- end
19
- rescue Errno::ENOENT => e
20
- logger.warn("Could not remove #{hostname} from know_hosts: #{e}")
21
- end
22
- end
23
- end
24
- end