smart_proxy_remote_execution_ssh 0.3.1 → 0.4.0
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/lib/smart_proxy_remote_execution_ssh.rb +27 -0
- data/lib/smart_proxy_remote_execution_ssh/actions/run_script.rb +20 -0
- data/lib/smart_proxy_remote_execution_ssh/async_scripts/control.sh +110 -0
- data/lib/smart_proxy_remote_execution_ssh/async_scripts/retrieve.sh +151 -0
- data/lib/smart_proxy_remote_execution_ssh/dispatcher.rb +10 -0
- data/lib/smart_proxy_remote_execution_ssh/log_filter.rb +14 -0
- data/lib/smart_proxy_remote_execution_ssh/plugin.rb +26 -10
- data/lib/smart_proxy_remote_execution_ssh/runners.rb +7 -0
- data/lib/smart_proxy_remote_execution_ssh/runners/fake_script_runner.rb +87 -0
- data/lib/smart_proxy_remote_execution_ssh/runners/polling_script_runner.rb +140 -0
- data/lib/smart_proxy_remote_execution_ssh/runners/script_runner.rb +469 -0
- data/lib/smart_proxy_remote_execution_ssh/version.rb +1 -1
- data/settings.d/remote_execution_ssh.yml.example +3 -0
- metadata +25 -2
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 750910e916f0d4ad411cf868636075e597573dd8cca720e414156bcee55331dc
|
4
|
+
data.tar.gz: 4003e71f358abc47847fb9a2e4cf3c211189bc915b6b4196dd02d6d21633d2b8
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: efa2a87ce6a6125f7701979305a0c5fcc1fb3ad2703d5aef140505943789b45648311e46f892791a919a88d757f019485ff45209efc22423cc6543a298224a03
|
7
|
+
data.tar.gz: 4411e680ca841903d47b295090c4a194f92dfe91cc58796272d42b9c370ad6a3e6a8bc34973f0d7a85d93fe604788993428d944277b05426194893ecff0a9d51
|
@@ -1,3 +1,4 @@
|
|
1
|
+
require 'foreman_tasks_core'
|
1
2
|
require 'smart_proxy_remote_execution_ssh/version'
|
2
3
|
require 'smart_proxy_dynflow'
|
3
4
|
require 'smart_proxy_remote_execution_ssh/webrick_ext'
|
@@ -19,6 +20,8 @@ module Proxy::RemoteExecution
|
|
19
20
|
unless File.exist?(public_key_file)
|
20
21
|
raise "Ssh public key file #{public_key_file} doesn't exist"
|
21
22
|
end
|
23
|
+
|
24
|
+
validate_ssh_log_level!
|
22
25
|
end
|
23
26
|
|
24
27
|
def private_key_file
|
@@ -28,6 +31,30 @@ module Proxy::RemoteExecution
|
|
28
31
|
def public_key_file
|
29
32
|
File.expand_path("#{private_key_file}.pub")
|
30
33
|
end
|
34
|
+
|
35
|
+
def validate_ssh_log_level!
|
36
|
+
wanted_level = Plugin.settings.ssh_log_level.to_s
|
37
|
+
levels = Plugin::SSH_LOG_LEVELS
|
38
|
+
unless levels.include? wanted_level
|
39
|
+
raise "Wrong value '#{Plugin.settings.ssh_log_level}' for ssh_log_level, must be one of #{levels.join(', ')}"
|
40
|
+
end
|
41
|
+
|
42
|
+
current = ::Proxy::SETTINGS.log_level.to_s.downcase
|
43
|
+
|
44
|
+
# regular log levels correspond to upcased ssh logger levels
|
45
|
+
ssh, regular = [wanted_level, current].map do |wanted|
|
46
|
+
levels.each_with_index.find { |value, _index| value == wanted }.last
|
47
|
+
end
|
48
|
+
|
49
|
+
if ssh < regular
|
50
|
+
raise 'ssh_log_level cannot be more verbose than regular log level'
|
51
|
+
end
|
52
|
+
|
53
|
+
Plugin.settings.ssh_log_level = Plugin.settings.ssh_log_level.to_sym
|
54
|
+
end
|
31
55
|
end
|
56
|
+
|
57
|
+
require 'smart_proxy_dynflow_core/task_launcher_registry'
|
58
|
+
SmartProxyDynflowCore::TaskLauncherRegistry.register('ssh', ForemanTasksCore::TaskLauncher::Batch)
|
32
59
|
end
|
33
60
|
end
|
@@ -0,0 +1,20 @@
|
|
1
|
+
require 'foreman_tasks_core/shareable_action'
|
2
|
+
|
3
|
+
module Proxy::RemoteExecution::Ssh
|
4
|
+
module Actions
|
5
|
+
class RunScript < ForemanTasksCore::Runner::Action
|
6
|
+
def initiate_runner
|
7
|
+
additional_options = {
|
8
|
+
:step_id => run_step_id,
|
9
|
+
:uuid => execution_plan_id,
|
10
|
+
}
|
11
|
+
Proxy::RemoteExecution::Ssh::Plugin.runner_class.build(input.merge(additional_options),
|
12
|
+
suspended_action: suspended_action)
|
13
|
+
end
|
14
|
+
|
15
|
+
def runner_dispatcher
|
16
|
+
Dispatcher.instance
|
17
|
+
end
|
18
|
+
end
|
19
|
+
end
|
20
|
+
end
|
@@ -0,0 +1,110 @@
|
|
1
|
+
#!/bin/sh
|
2
|
+
#
|
3
|
+
# Control script for the remote execution jobs.
|
4
|
+
#
|
5
|
+
# The initial script calls `$CONTROL_SCRIPT init-script-finish` once the original script exits.
|
6
|
+
# In automatic mode, the exit code is sent back to the proxy on `init-script-finish`.
|
7
|
+
#
|
8
|
+
# What the script provides is also a manual mode, where the author of the rex script can take
|
9
|
+
# full control of the job lifecycle. This allows keeping the marked as running even when
|
10
|
+
# the initial script finishes.
|
11
|
+
#
|
12
|
+
# The manual mode is turned on by calling `$CONTROL_SCRIPT manual-control`. After calling this,
|
13
|
+
# one can call `echo message | $CONTROL_SCRIPT update` to send output to the remote execution jobs
|
14
|
+
# and `$CONTROL_SCRIPT finish 0` once finished (with 0 as exit code) to send output to the remote execution jobs
|
15
|
+
# and `$CONTROL_SCRIPT finish 0` once finished (with 0 as exit code)
|
16
|
+
BASE_DIR="$(dirname "$(readlink -f "$0")")"
|
17
|
+
|
18
|
+
if ! command -v curl >/dev/null; then
|
19
|
+
echo 'curl is required' >&2
|
20
|
+
exit 1
|
21
|
+
fi
|
22
|
+
|
23
|
+
# send the callback data to proxy
|
24
|
+
update() {
|
25
|
+
"$BASE_DIR/retrieve.sh" push_update
|
26
|
+
}
|
27
|
+
|
28
|
+
# wait for named pipe $1 to retrieve data. If $2 is provided, it serves as timeout
|
29
|
+
# in seconds on how long to wait when reading.
|
30
|
+
wait_for_pipe() {
|
31
|
+
pipe_path=$1
|
32
|
+
if [ -n "$2" ]; then
|
33
|
+
timeout="-t $2"
|
34
|
+
fi
|
35
|
+
if read $timeout <>"$pipe_path"; then
|
36
|
+
rm "$pipe_path"
|
37
|
+
return 0
|
38
|
+
else
|
39
|
+
return 1
|
40
|
+
fi
|
41
|
+
}
|
42
|
+
|
43
|
+
# function run in background, when receiving update data via STDIN.
|
44
|
+
periodic_update() {
|
45
|
+
interval=1
|
46
|
+
# reading some data from periodic_update_control signals we're done
|
47
|
+
while ! wait_for_pipe "$BASE_DIR/periodic_update_control" "$interval"; do
|
48
|
+
update
|
49
|
+
done
|
50
|
+
# one more update before we finish
|
51
|
+
update
|
52
|
+
# signal the main process that we are finished
|
53
|
+
echo > "$BASE_DIR/periodic_update_finished"
|
54
|
+
}
|
55
|
+
|
56
|
+
# signal the periodic_update process that the main process is finishing
|
57
|
+
periodic_update_finish() {
|
58
|
+
if [ -e "$BASE_DIR/periodic_update_control" ]; then
|
59
|
+
echo > "$BASE_DIR/periodic_update_control"
|
60
|
+
fi
|
61
|
+
}
|
62
|
+
|
63
|
+
ACTION=${1:-finish}
|
64
|
+
|
65
|
+
case "$ACTION" in
|
66
|
+
init-script-finish)
|
67
|
+
if ! [ -e "$BASE_DIR/manual_mode" ]; then
|
68
|
+
# make the exit code of initialization script the exit code of the whole job
|
69
|
+
cp init_exit_code exit_code
|
70
|
+
update
|
71
|
+
fi
|
72
|
+
;;
|
73
|
+
finish)
|
74
|
+
# take exit code passed via the command line, with fallback
|
75
|
+
# to the exit code of the initialization script
|
76
|
+
exit_code=${2:-$(cat "$BASE_DIR/init_exit_code")}
|
77
|
+
echo $exit_code > "$BASE_DIR/exit_code"
|
78
|
+
update
|
79
|
+
if [ -e "$BASE_DIR/manual_mode" ]; then
|
80
|
+
rm "$BASE_DIR/manual_mode"
|
81
|
+
fi
|
82
|
+
;;
|
83
|
+
update)
|
84
|
+
# read data from input when redirected though a pipe
|
85
|
+
if ! [ -t 0 ]; then
|
86
|
+
# couple of named pipes to coordinate the main process with the periodic_update
|
87
|
+
mkfifo "$BASE_DIR/periodic_update_control"
|
88
|
+
mkfifo "$BASE_DIR/periodic_update_finished"
|
89
|
+
trap "periodic_update_finish" EXIT
|
90
|
+
# run periodic update as separate process to keep sending updates in output to server
|
91
|
+
periodic_update &
|
92
|
+
# redirect the input into output
|
93
|
+
tee -a "$BASE_DIR/output"
|
94
|
+
periodic_update_finish
|
95
|
+
# ensure the periodic update finished before we return
|
96
|
+
wait_for_pipe "$BASE_DIR/periodic_update_finished"
|
97
|
+
else
|
98
|
+
update
|
99
|
+
fi
|
100
|
+
;;
|
101
|
+
# mark the script to be in manual mode: this means the script author needs to use `update` and `finish`
|
102
|
+
# commands to send output to the remote execution job or mark it as finished.
|
103
|
+
manual-mode)
|
104
|
+
touch "$BASE_DIR/manual_mode"
|
105
|
+
;;
|
106
|
+
*)
|
107
|
+
echo "Unknown action $ACTION"
|
108
|
+
exit 1
|
109
|
+
;;
|
110
|
+
esac
|
@@ -0,0 +1,151 @@
|
|
1
|
+
#!/bin/sh
|
2
|
+
|
3
|
+
if ! pgrep --help 2>/dev/null >/dev/null; then
|
4
|
+
echo DONE 1
|
5
|
+
echo "pgrep is required" >&2
|
6
|
+
exit 1
|
7
|
+
fi
|
8
|
+
|
9
|
+
BASE_DIR="$(dirname "$(readlink -f "$0")")"
|
10
|
+
|
11
|
+
# load the data required for generating the callback
|
12
|
+
. "$BASE_DIR/env.sh"
|
13
|
+
URL_PREFIX="$CALLBACK_HOST/dynflow/tasks/$TASK_ID"
|
14
|
+
AUTH="$TASK_ID:$OTP"
|
15
|
+
CURL="curl --silent --show-error --fail --max-time 10"
|
16
|
+
|
17
|
+
MY_LOCK_FILE="$BASE_DIR/retrieve_lock.$$"
|
18
|
+
MY_PID=$$
|
19
|
+
echo $MY_PID >"$MY_LOCK_FILE"
|
20
|
+
LOCK_FILE="$BASE_DIR/retrieve_lock"
|
21
|
+
TMP_OUTPUT_FILE="$BASE_DIR/tmp_output"
|
22
|
+
|
23
|
+
RUN_TIMEOUT=30 # for how long can the script hold the lock
|
24
|
+
WAIT_TIMEOUT=60 # for how long the script is trying to acquire the lock
|
25
|
+
START_TIME=$(date +%s)
|
26
|
+
|
27
|
+
fail() {
|
28
|
+
echo RUNNING
|
29
|
+
echo "$1"
|
30
|
+
exit 1
|
31
|
+
}
|
32
|
+
|
33
|
+
acquire_lock() {
|
34
|
+
# try to acquire lock by creating the file (ln should be atomic an fail in case
|
35
|
+
# another process succeeded first). We also check the content of the lock file,
|
36
|
+
# in case our process won when competing over the lock while invalidating
|
37
|
+
# the lock on timeout.
|
38
|
+
ln "$MY_LOCK_FILE" "$LOCK_FILE" 2>/dev/null || [ "$(head -n1 "$LOCK_FILE")" = "$MY_PID" ]
|
39
|
+
return $?
|
40
|
+
}
|
41
|
+
|
42
|
+
# acquiring the lock before proceeding, to ensure only one instance of the script is running
|
43
|
+
while ! acquire_lock; do
|
44
|
+
# we failed to create retrieve_lock - assuming there is already another retrieve script running
|
45
|
+
current_pid=$(head -n1 "$LOCK_FILE")
|
46
|
+
if [ -z "$current_pid" ]; then
|
47
|
+
continue
|
48
|
+
fi
|
49
|
+
# check whether the lock is not too old (compared to $RUN_TIMEOUT) and try to kill
|
50
|
+
# if it is, so that we don't have a stalled processes here
|
51
|
+
lock_lines_count=$(wc -l < "$LOCK_FILE")
|
52
|
+
current_lock_time=$(stat --format "%Y" "$LOCK_FILE")
|
53
|
+
current_time=$(date +%s)
|
54
|
+
|
55
|
+
if [ "$(( current_time - START_TIME ))" -gt "$WAIT_TIMEOUT" ]; then
|
56
|
+
# We were waiting for the lock for too long - just give up
|
57
|
+
fail "Wait time exceeded $WAIT_TIMEOUT"
|
58
|
+
elif [ "$(( current_time - current_lock_time ))" -gt "$RUN_TIMEOUT" ]; then
|
59
|
+
# The previous lock it hold for too long - re-acquiring procedure
|
60
|
+
if [ "$lock_lines_count" -gt 1 ]; then
|
61
|
+
# there were multiple processes waiting for lock without resolution
|
62
|
+
# longer than the $RUN_TIMEOUT - we reset the lock file and let processes
|
63
|
+
# to compete
|
64
|
+
echo "RETRY" > "$LOCK_FILE"
|
65
|
+
fi
|
66
|
+
if [ "$current_pid" != "RETRY" ]; then
|
67
|
+
# try to kill the currently stalled process
|
68
|
+
kill -9 "$current_pid" 2>/dev/null
|
69
|
+
fi
|
70
|
+
# try to add our process as one candidate
|
71
|
+
echo $MY_PID >> "$LOCK_FILE"
|
72
|
+
if [ "$( head -n2 "$LOCK_FILE" | tail -n1 )" = "$MY_PID" ]; then
|
73
|
+
# our process won the competition for the new lock: it is the first pid
|
74
|
+
# after the original one in the lock file - take ownership of the lock
|
75
|
+
# next iteration only this process will get through
|
76
|
+
echo $MY_PID >"$LOCK_FILE"
|
77
|
+
fi
|
78
|
+
else
|
79
|
+
# still waiting for the original owner to finish
|
80
|
+
sleep 1
|
81
|
+
fi
|
82
|
+
done
|
83
|
+
|
84
|
+
release_lock() {
|
85
|
+
rm "$MY_LOCK_FILE"
|
86
|
+
rm "$LOCK_FILE"
|
87
|
+
}
|
88
|
+
# ensure the release the lock at exit
|
89
|
+
trap "release_lock" EXIT
|
90
|
+
|
91
|
+
# make sure we clear previous tmp output file
|
92
|
+
if [ -e "$TMP_OUTPUT_FILE" ]; then
|
93
|
+
rm "$TMP_OUTPUT_FILE"
|
94
|
+
fi
|
95
|
+
|
96
|
+
pid=$(cat "$BASE_DIR/pid")
|
97
|
+
[ -f "$BASE_DIR/position" ] || echo 1 > "$BASE_DIR/position"
|
98
|
+
position=$(cat "$BASE_DIR/position")
|
99
|
+
|
100
|
+
prepare_output() {
|
101
|
+
if [ -e "$BASE_DIR/manual_mode" ] || ([ -n "$pid" ] && pgrep -P "$pid" >/dev/null 2>&1); then
|
102
|
+
echo RUNNING
|
103
|
+
else
|
104
|
+
echo "DONE $(cat "$BASE_DIR/exit_code" 2>/dev/null)"
|
105
|
+
fi
|
106
|
+
[ -f "$BASE_DIR/output" ] || exit 0
|
107
|
+
tail --bytes "+${position}" "$BASE_DIR/output" > "$TMP_OUTPUT_FILE"
|
108
|
+
cat "$TMP_OUTPUT_FILE"
|
109
|
+
}
|
110
|
+
|
111
|
+
# prepare the callback payload
|
112
|
+
payload() {
|
113
|
+
if [ -n "$1" ]; then
|
114
|
+
exit_code="$1"
|
115
|
+
else
|
116
|
+
exit_code=null
|
117
|
+
fi
|
118
|
+
|
119
|
+
if [ -e "$BASE_DIR/manual_mode" ]; then
|
120
|
+
manual_mode=true
|
121
|
+
output=$(prepare_output | base64 -w0)
|
122
|
+
else
|
123
|
+
manual_mode=false
|
124
|
+
fi
|
125
|
+
|
126
|
+
echo "{ \"exit_code\": $exit_code,"\
|
127
|
+
" \"step_id\": \"$STEP_ID\","\
|
128
|
+
" \"manual_mode\": $manual_mode,"\
|
129
|
+
" \"output\": \"$output\" }"
|
130
|
+
}
|
131
|
+
|
132
|
+
if [ "$1" = "push_update" ]; then
|
133
|
+
if [ -e "$BASE_DIR/exit_code" ]; then
|
134
|
+
exit_code="$(cat "$BASE_DIR/exit_code")"
|
135
|
+
action="done"
|
136
|
+
else
|
137
|
+
exit_code=""
|
138
|
+
action="update"
|
139
|
+
fi
|
140
|
+
$CURL -X POST -d "$(payload $exit_code)" -u "$AUTH" "$URL_PREFIX"/$action 2>>"$BASE_DIR/curl_stderr"
|
141
|
+
success=$?
|
142
|
+
else
|
143
|
+
prepare_output
|
144
|
+
success=$?
|
145
|
+
fi
|
146
|
+
|
147
|
+
if [ "$success" = 0 ] && [ -e "$TMP_OUTPUT_FILE" ]; then
|
148
|
+
# in case the retrieval was successful, move the position of the cursor to be read next time
|
149
|
+
bytes=$(wc --bytes < "$TMP_OUTPUT_FILE")
|
150
|
+
expr "${position}" + "${bytes}" > "$BASE_DIR/position"
|
151
|
+
fi
|
@@ -0,0 +1,10 @@
|
|
1
|
+
require 'foreman_tasks_core/runner/dispatcher'
|
2
|
+
|
3
|
+
module Proxy::RemoteExecution::Ssh
|
4
|
+
class Dispatcher < ::ForemanTasksCore::Runner::Dispatcher
|
5
|
+
def refresh_interval
|
6
|
+
@refresh_interval ||= Plugin.settings[:runner_refresh_interval] ||
|
7
|
+
Plugin.runner_class::DEFAULT_REFRESH_INTERVAL
|
8
|
+
end
|
9
|
+
end
|
10
|
+
end
|
@@ -0,0 +1,14 @@
|
|
1
|
+
module Proxy::RemoteExecution::Ssh
|
2
|
+
class LogFilter < ::Logger
|
3
|
+
def initialize(base_logger)
|
4
|
+
@base_logger = base_logger
|
5
|
+
end
|
6
|
+
|
7
|
+
def add(severity, *args, &block)
|
8
|
+
severity ||= ::Logger::UNKNOWN
|
9
|
+
return true if @base_logger.nil? || severity < @level
|
10
|
+
|
11
|
+
@base_logger.add(severity, *args, &block)
|
12
|
+
end
|
13
|
+
end
|
14
|
+
end
|
@@ -1,5 +1,7 @@
|
|
1
1
|
module Proxy::RemoteExecution::Ssh
|
2
2
|
class Plugin < Proxy::Plugin
|
3
|
+
SSH_LOG_LEVELS = %w[debug info warn error fatal].freeze
|
4
|
+
|
3
5
|
http_rackup_path File.expand_path("http_config.ru", File.expand_path("../", __FILE__))
|
4
6
|
https_rackup_path File.expand_path("http_config.ru", File.expand_path("../", __FILE__))
|
5
7
|
|
@@ -9,7 +11,11 @@ module Proxy::RemoteExecution::Ssh
|
|
9
11
|
:remote_working_dir => '/var/tmp',
|
10
12
|
:local_working_dir => '/var/tmp',
|
11
13
|
:kerberos_auth => false,
|
12
|
-
:async_ssh => false
|
14
|
+
:async_ssh => false,
|
15
|
+
# When set to nil, makes REX use the runner's default interval
|
16
|
+
# :runner_refresh_interval => nil,
|
17
|
+
:ssh_log_level => :fatal,
|
18
|
+
:cleanup_working_dirs => true
|
13
19
|
|
14
20
|
plugin :ssh, Proxy::RemoteExecution::Ssh::VERSION
|
15
21
|
after_activation do
|
@@ -17,17 +23,27 @@ module Proxy::RemoteExecution::Ssh
|
|
17
23
|
require 'smart_proxy_remote_execution_ssh/version'
|
18
24
|
require 'smart_proxy_remote_execution_ssh/cockpit'
|
19
25
|
require 'smart_proxy_remote_execution_ssh/api'
|
20
|
-
|
21
|
-
|
22
|
-
|
23
|
-
|
24
|
-
|
25
|
-
rescue LoadError # rubocop:disable Lint/HandleExceptions
|
26
|
-
# Dynflow core is not available in the proxy, will be handled
|
27
|
-
# by standalone Dynflow core
|
28
|
-
end
|
26
|
+
require 'smart_proxy_remote_execution_ssh/actions/run_script'
|
27
|
+
require 'smart_proxy_remote_execution_ssh/dispatcher'
|
28
|
+
require 'smart_proxy_remote_execution_ssh/log_filter'
|
29
|
+
require 'smart_proxy_remote_execution_ssh/runners'
|
30
|
+
require 'smart_proxy_dynflow_core'
|
29
31
|
|
30
32
|
Proxy::RemoteExecution::Ssh.validate!
|
31
33
|
end
|
34
|
+
|
35
|
+
def self.simulate?
|
36
|
+
@simulate ||= %w[yes true 1].include? ENV.fetch('REX_SIMULATE', '').downcase
|
37
|
+
end
|
38
|
+
|
39
|
+
def self.runner_class
|
40
|
+
@runner_class ||= if simulate?
|
41
|
+
Runners::FakeScriptRunner
|
42
|
+
elsif settings[:async_ssh]
|
43
|
+
Runners::PollingScriptRunner
|
44
|
+
else
|
45
|
+
Runners::ScriptRunner
|
46
|
+
end
|
47
|
+
end
|
32
48
|
end
|
33
49
|
end
|
@@ -0,0 +1,7 @@
|
|
1
|
+
module Proxy::RemoteExecution::Ssh
|
2
|
+
module Runners
|
3
|
+
require 'smart_proxy_remote_execution_ssh/runners/script_runner'
|
4
|
+
require 'smart_proxy_remote_execution_ssh/runners/polling_script_runner'
|
5
|
+
require 'smart_proxy_remote_execution_ssh/runners/fake_script_runner'
|
6
|
+
end
|
7
|
+
end
|
@@ -0,0 +1,87 @@
|
|
1
|
+
module Proxy::RemoteExecution::Ssh::Runners
|
2
|
+
class FakeScriptRunner < ForemanTasksCore::Runner::Base
|
3
|
+
DEFAULT_REFRESH_INTERVAL = 1
|
4
|
+
|
5
|
+
@data = []
|
6
|
+
|
7
|
+
class << self
|
8
|
+
attr_accessor :data
|
9
|
+
|
10
|
+
def load_data(path = nil)
|
11
|
+
if path.nil?
|
12
|
+
@data = <<-BANNER.gsub(/^\s+\| ?/, '').lines
|
13
|
+
| ====== Simulated Remote Execution ======
|
14
|
+
|
|
15
|
+
| This is an output of a simulated remote
|
16
|
+
| execution run. It should run for about
|
17
|
+
| 5 seconds and finish successfully.
|
18
|
+
BANNER
|
19
|
+
else
|
20
|
+
File.open(File.expand_path(path), 'r') do |f|
|
21
|
+
@data = f.readlines.map(&:chomp)
|
22
|
+
end
|
23
|
+
end
|
24
|
+
@data.freeze
|
25
|
+
end
|
26
|
+
|
27
|
+
def build(options, suspended_action:)
|
28
|
+
new(options, suspended_action: suspended_action)
|
29
|
+
end
|
30
|
+
end
|
31
|
+
|
32
|
+
def initialize(*args)
|
33
|
+
super
|
34
|
+
# Load the fake output the first time its needed
|
35
|
+
self.class.load_data(ENV['REX_SIMULATE_PATH']) unless self.class.data.frozen?
|
36
|
+
@position = 0
|
37
|
+
end
|
38
|
+
|
39
|
+
def start
|
40
|
+
refresh
|
41
|
+
end
|
42
|
+
|
43
|
+
# Do one step
|
44
|
+
def refresh
|
45
|
+
if done?
|
46
|
+
finish
|
47
|
+
else
|
48
|
+
step
|
49
|
+
end
|
50
|
+
end
|
51
|
+
|
52
|
+
def kill
|
53
|
+
finish
|
54
|
+
end
|
55
|
+
|
56
|
+
private
|
57
|
+
|
58
|
+
def finish
|
59
|
+
publish_exit_status exit_code
|
60
|
+
end
|
61
|
+
|
62
|
+
def step
|
63
|
+
publish_data(next_chunk, 'stdout')
|
64
|
+
end
|
65
|
+
|
66
|
+
def done?
|
67
|
+
@position == self.class.data.count
|
68
|
+
end
|
69
|
+
|
70
|
+
def next_chunk
|
71
|
+
output = self.class.data[@position]
|
72
|
+
@position += 1
|
73
|
+
output
|
74
|
+
end
|
75
|
+
|
76
|
+
# Decide if the execution should fail or not
|
77
|
+
def exit_code
|
78
|
+
fail_chance = ENV.fetch('REX_SIMULATE_FAIL_CHANCE', 0).to_i
|
79
|
+
fail_exitcode = ENV.fetch('REX_SIMULATE_EXIT', 0).to_i
|
80
|
+
if fail_exitcode.zero? || fail_chance < (Random.rand * 100).round
|
81
|
+
0
|
82
|
+
else
|
83
|
+
fail_exitcode
|
84
|
+
end
|
85
|
+
end
|
86
|
+
end
|
87
|
+
end
|
@@ -0,0 +1,140 @@
|
|
1
|
+
require 'base64'
|
2
|
+
|
3
|
+
module Proxy::RemoteExecution::Ssh::Runners
|
4
|
+
class PollingScriptRunner < ScriptRunner
|
5
|
+
DEFAULT_REFRESH_INTERVAL = 60
|
6
|
+
|
7
|
+
def self.load_script(name)
|
8
|
+
script_dir = File.expand_path('../async_scripts', __dir__)
|
9
|
+
File.read(File.join(script_dir, name))
|
10
|
+
end
|
11
|
+
|
12
|
+
# The script that controls the flow of the job, able to initiate update or
|
13
|
+
# finish on the task, or take over the control over script lifecycle
|
14
|
+
CONTROL_SCRIPT = load_script('control.sh')
|
15
|
+
|
16
|
+
# The script always outputs at least one line
|
17
|
+
# First line of the output either has to begin with
|
18
|
+
# "RUNNING" or "DONE $EXITCODE"
|
19
|
+
# The following lines are treated as regular output
|
20
|
+
RETRIEVE_SCRIPT = load_script('retrieve.sh')
|
21
|
+
|
22
|
+
def initialize(options, user_method, suspended_action: nil)
|
23
|
+
super(options, user_method, suspended_action: suspended_action)
|
24
|
+
@callback_host = options[:callback_host]
|
25
|
+
@task_id = options[:uuid]
|
26
|
+
@step_id = options[:step_id]
|
27
|
+
@otp = ForemanTasksCore::OtpManager.generate_otp(@task_id)
|
28
|
+
end
|
29
|
+
|
30
|
+
def prepare_start
|
31
|
+
super
|
32
|
+
@base_dir = File.dirname @remote_script
|
33
|
+
upload_control_scripts
|
34
|
+
end
|
35
|
+
|
36
|
+
def initialization_script
|
37
|
+
close_stdin = '</dev/null'
|
38
|
+
close_fds = close_stdin + ' >/dev/null 2>/dev/null'
|
39
|
+
main_script = "(#{@remote_script} #{close_stdin} 2>&1; echo $?>#{@base_dir}/init_exit_code) >#{@base_dir}/output"
|
40
|
+
control_script_finish = "#{@control_script_path} init-script-finish"
|
41
|
+
<<-SCRIPT.gsub(/^ +\| /, '')
|
42
|
+
| export CONTROL_SCRIPT="#{@control_script_path}"
|
43
|
+
| sh -c '#{main_script}; #{control_script_finish}' #{close_fds} &
|
44
|
+
| echo $! > '#{@base_dir}/pid'
|
45
|
+
SCRIPT
|
46
|
+
end
|
47
|
+
|
48
|
+
def trigger(*args)
|
49
|
+
run_sync(*args)
|
50
|
+
end
|
51
|
+
|
52
|
+
def refresh
|
53
|
+
err = output = nil
|
54
|
+
begin
|
55
|
+
_, output, err = run_sync("#{@user_method.cli_command_prefix} #{@retrieval_script}")
|
56
|
+
rescue StandardError => e
|
57
|
+
@logger.info("Error while connecting to the remote host on refresh: #{e.message}")
|
58
|
+
end
|
59
|
+
|
60
|
+
process_retrieved_data(output, err)
|
61
|
+
ensure
|
62
|
+
destroy_session
|
63
|
+
end
|
64
|
+
|
65
|
+
def process_retrieved_data(output, err)
|
66
|
+
return if output.nil? || output.empty?
|
67
|
+
|
68
|
+
lines = output.lines
|
69
|
+
result = lines.shift.match(/^DONE (\d+)?/)
|
70
|
+
publish_data(lines.join, 'stdout') unless lines.empty?
|
71
|
+
publish_data(err, 'stderr') unless err.empty?
|
72
|
+
if result
|
73
|
+
exitcode = result[1] || 0
|
74
|
+
publish_exit_status(exitcode.to_i)
|
75
|
+
cleanup
|
76
|
+
end
|
77
|
+
end
|
78
|
+
|
79
|
+
def external_event(event)
|
80
|
+
data = event.data
|
81
|
+
if data['manual_mode']
|
82
|
+
load_event_updates(data)
|
83
|
+
else
|
84
|
+
# getting the update from automatic mode - reaching to the host to get the latest update
|
85
|
+
return run_refresh
|
86
|
+
end
|
87
|
+
ensure
|
88
|
+
destroy_session
|
89
|
+
end
|
90
|
+
|
91
|
+
def close
|
92
|
+
super
|
93
|
+
ForemanTasksCore::OtpManager.drop_otp(@task_id, @otp) if @otp
|
94
|
+
end
|
95
|
+
|
96
|
+
def upload_control_scripts
|
97
|
+
return if @control_scripts_uploaded
|
98
|
+
|
99
|
+
cp_script_to_remote(env_script, 'env.sh')
|
100
|
+
@control_script_path = cp_script_to_remote(CONTROL_SCRIPT, 'control.sh')
|
101
|
+
@retrieval_script = cp_script_to_remote(RETRIEVE_SCRIPT, 'retrieve.sh')
|
102
|
+
@control_scripts_uploaded = true
|
103
|
+
end
|
104
|
+
|
105
|
+
# Script setting the dynamic values to env variables: it's sourced from other control scripts
|
106
|
+
def env_script
|
107
|
+
<<-SCRIPT.gsub(/^ +\| /, '')
|
108
|
+
| CALLBACK_HOST="#{@callback_host}"
|
109
|
+
| TASK_ID="#{@task_id}"
|
110
|
+
| STEP_ID="#{@step_id}"
|
111
|
+
| OTP="#{@otp}"
|
112
|
+
SCRIPT
|
113
|
+
end
|
114
|
+
|
115
|
+
private
|
116
|
+
|
117
|
+
# Generates updates based on the callback data from the manual mode
|
118
|
+
def load_event_updates(event_data)
|
119
|
+
continuous_output = ForemanTasksCore::ContinuousOutput.new
|
120
|
+
if event_data.key?('output')
|
121
|
+
lines = Base64.decode64(event_data['output']).sub(/\A(RUNNING|DONE).*\n/, '')
|
122
|
+
continuous_output.add_output(lines, 'stdout')
|
123
|
+
end
|
124
|
+
cleanup if event_data['exit_code']
|
125
|
+
new_update(continuous_output, event_data['exit_code'])
|
126
|
+
end
|
127
|
+
|
128
|
+
def cleanup
|
129
|
+
run_sync("rm -rf \"#{remote_command_dir}\"") if @cleanup_working_dirs
|
130
|
+
end
|
131
|
+
|
132
|
+
def destroy_session
|
133
|
+
if @session
|
134
|
+
@logger.debug("Closing session with #{@ssh_user}@#{@host}")
|
135
|
+
@session.close
|
136
|
+
@session = nil
|
137
|
+
end
|
138
|
+
end
|
139
|
+
end
|
140
|
+
end
|
@@ -0,0 +1,469 @@
|
|
1
|
+
require 'net/ssh'
|
2
|
+
require 'fileutils'
|
3
|
+
|
4
|
+
# Rubocop can't make up its mind what it wants
|
5
|
+
# rubocop:disable Lint/SuppressedException, Lint/RedundantCopDisableDirective
|
6
|
+
begin
|
7
|
+
require 'net/ssh/krb'
|
8
|
+
rescue LoadError; end
|
9
|
+
# rubocop:enable Lint/SuppressedException, Lint/RedundantCopDisableDirective
|
10
|
+
|
11
|
+
module Proxy::RemoteExecution::Ssh::Runners
|
12
|
+
class EffectiveUserMethod
|
13
|
+
attr_reader :effective_user, :ssh_user, :effective_user_password, :password_sent
|
14
|
+
|
15
|
+
def initialize(effective_user, ssh_user, effective_user_password)
|
16
|
+
@effective_user = effective_user
|
17
|
+
@ssh_user = ssh_user
|
18
|
+
@effective_user_password = effective_user_password.to_s
|
19
|
+
@password_sent = false
|
20
|
+
end
|
21
|
+
|
22
|
+
def on_data(received_data, ssh_channel)
|
23
|
+
if received_data.match(login_prompt)
|
24
|
+
ssh_channel.send_data(effective_user_password + "\n")
|
25
|
+
@password_sent = true
|
26
|
+
end
|
27
|
+
end
|
28
|
+
|
29
|
+
def filter_password?(received_data)
|
30
|
+
!@effective_user_password.empty? && @password_sent && received_data.match(Regexp.escape(@effective_user_password))
|
31
|
+
end
|
32
|
+
|
33
|
+
def sent_all_data?
|
34
|
+
effective_user_password.empty? || password_sent
|
35
|
+
end
|
36
|
+
|
37
|
+
def reset
|
38
|
+
@password_sent = false
|
39
|
+
end
|
40
|
+
|
41
|
+
def cli_command_prefix; end
|
42
|
+
|
43
|
+
def login_prompt; end
|
44
|
+
end
|
45
|
+
|
46
|
+
class SudoUserMethod < EffectiveUserMethod
|
47
|
+
LOGIN_PROMPT = 'rex login: '.freeze
|
48
|
+
|
49
|
+
def login_prompt
|
50
|
+
LOGIN_PROMPT
|
51
|
+
end
|
52
|
+
|
53
|
+
def cli_command_prefix
|
54
|
+
"sudo -p '#{LOGIN_PROMPT}' -u #{effective_user} "
|
55
|
+
end
|
56
|
+
end
|
57
|
+
|
58
|
+
class DzdoUserMethod < EffectiveUserMethod
|
59
|
+
LOGIN_PROMPT = /password/i.freeze
|
60
|
+
|
61
|
+
def login_prompt
|
62
|
+
LOGIN_PROMPT
|
63
|
+
end
|
64
|
+
|
65
|
+
def cli_command_prefix
|
66
|
+
"dzdo -u #{effective_user} "
|
67
|
+
end
|
68
|
+
end
|
69
|
+
|
70
|
+
class SuUserMethod < EffectiveUserMethod
|
71
|
+
LOGIN_PROMPT = /Password: /i.freeze
|
72
|
+
|
73
|
+
def login_prompt
|
74
|
+
LOGIN_PROMPT
|
75
|
+
end
|
76
|
+
|
77
|
+
def cli_command_prefix
|
78
|
+
"su - #{effective_user} -c "
|
79
|
+
end
|
80
|
+
end
|
81
|
+
|
82
|
+
class NoopUserMethod
|
83
|
+
def on_data(_, _); end
|
84
|
+
|
85
|
+
def filter_password?(received_data)
|
86
|
+
false
|
87
|
+
end
|
88
|
+
|
89
|
+
def sent_all_data?
|
90
|
+
true
|
91
|
+
end
|
92
|
+
|
93
|
+
def cli_command_prefix; end
|
94
|
+
|
95
|
+
def reset; end
|
96
|
+
end
|
97
|
+
|
98
|
+
# rubocop:disable Metrics/ClassLength
|
99
|
+
class ScriptRunner < ForemanTasksCore::Runner::Base
|
100
|
+
attr_reader :execution_timeout_interval
|
101
|
+
|
102
|
+
EXPECTED_POWER_ACTION_MESSAGES = ['restart host', 'shutdown host'].freeze
|
103
|
+
DEFAULT_REFRESH_INTERVAL = 1
|
104
|
+
MAX_PROCESS_RETRIES = 4
|
105
|
+
|
106
|
+
def initialize(options, user_method, suspended_action: nil)
|
107
|
+
super suspended_action: suspended_action
|
108
|
+
@host = options.fetch(:hostname)
|
109
|
+
@script = options.fetch(:script)
|
110
|
+
@ssh_user = options.fetch(:ssh_user, 'root')
|
111
|
+
@ssh_port = options.fetch(:ssh_port, 22)
|
112
|
+
@ssh_password = options.fetch(:secrets, {}).fetch(:ssh_password, nil)
|
113
|
+
@key_passphrase = options.fetch(:secrets, {}).fetch(:key_passphrase, nil)
|
114
|
+
@host_public_key = options.fetch(:host_public_key, nil)
|
115
|
+
@verify_host = options.fetch(:verify_host, nil)
|
116
|
+
@execution_timeout_interval = options.fetch(:execution_timeout_interval, nil)
|
117
|
+
|
118
|
+
@client_private_key_file = settings.ssh_identity_key_file
|
119
|
+
@local_working_dir = options.fetch(:local_working_dir, settings.local_working_dir)
|
120
|
+
@remote_working_dir = options.fetch(:remote_working_dir, settings.remote_working_dir)
|
121
|
+
@cleanup_working_dirs = options.fetch(:cleanup_working_dirs, settings.cleanup_working_dirs)
|
122
|
+
@user_method = user_method
|
123
|
+
end
|
124
|
+
|
125
|
+
def self.build(options, suspended_action:)
|
126
|
+
effective_user = options.fetch(:effective_user, nil)
|
127
|
+
ssh_user = options.fetch(:ssh_user, 'root')
|
128
|
+
effective_user_method = options.fetch(:effective_user_method, 'sudo')
|
129
|
+
|
130
|
+
user_method = if effective_user.nil? || effective_user == ssh_user
|
131
|
+
NoopUserMethod.new
|
132
|
+
elsif effective_user_method == 'sudo'
|
133
|
+
SudoUserMethod.new(effective_user, ssh_user,
|
134
|
+
options.fetch(:secrets, {}).fetch(:effective_user_password, nil))
|
135
|
+
elsif effective_user_method == 'dzdo'
|
136
|
+
DzdoUserMethod.new(effective_user, ssh_user,
|
137
|
+
options.fetch(:secrets, {}).fetch(:effective_user_password, nil))
|
138
|
+
elsif effective_user_method == 'su'
|
139
|
+
SuUserMethod.new(effective_user, ssh_user,
|
140
|
+
options.fetch(:secrets, {}).fetch(:effective_user_password, nil))
|
141
|
+
else
|
142
|
+
raise "effective_user_method '#{effective_user_method}' not supported"
|
143
|
+
end
|
144
|
+
|
145
|
+
new(options, user_method, suspended_action: suspended_action)
|
146
|
+
end
|
147
|
+
|
148
|
+
def start
|
149
|
+
prepare_start
|
150
|
+
script = initialization_script
|
151
|
+
logger.debug("executing script:\n#{indent_multiline(script)}")
|
152
|
+
trigger(script)
|
153
|
+
rescue StandardError => e
|
154
|
+
logger.error("error while initalizing command #{e.class} #{e.message}:\n #{e.backtrace.join("\n")}")
|
155
|
+
publish_exception('Error initializing command', e)
|
156
|
+
end
|
157
|
+
|
158
|
+
def trigger(*args)
|
159
|
+
run_async(*args)
|
160
|
+
end
|
161
|
+
|
162
|
+
def prepare_start
|
163
|
+
@remote_script = cp_script_to_remote
|
164
|
+
@output_path = File.join(File.dirname(@remote_script), 'output')
|
165
|
+
@exit_code_path = File.join(File.dirname(@remote_script), 'exit_code')
|
166
|
+
end
|
167
|
+
|
168
|
+
# the script that initiates the execution
|
169
|
+
def initialization_script
|
170
|
+
su_method = @user_method.instance_of?(SuUserMethod)
|
171
|
+
# pipe the output to tee while capturing the exit code in a file
|
172
|
+
<<-SCRIPT.gsub(/^\s+\| /, '')
|
173
|
+
| sh -c "(#{@user_method.cli_command_prefix}#{su_method ? "'#{@remote_script} < /dev/null '" : "#{@remote_script} < /dev/null"}; echo \\$?>#{@exit_code_path}) | /usr/bin/tee #{@output_path}
|
174
|
+
| exit \\$(cat #{@exit_code_path})"
|
175
|
+
SCRIPT
|
176
|
+
end
|
177
|
+
|
178
|
+
def refresh
|
179
|
+
return if @session.nil?
|
180
|
+
|
181
|
+
with_retries do
|
182
|
+
with_disconnect_handling do
|
183
|
+
@session.process(0)
|
184
|
+
end
|
185
|
+
end
|
186
|
+
ensure
|
187
|
+
check_expecting_disconnect
|
188
|
+
end
|
189
|
+
|
190
|
+
def kill
|
191
|
+
if @session
|
192
|
+
run_sync("pkill -f #{remote_command_file('script')}")
|
193
|
+
else
|
194
|
+
logger.debug('connection closed')
|
195
|
+
end
|
196
|
+
rescue StandardError => e
|
197
|
+
publish_exception('Unexpected error', e, false)
|
198
|
+
end
|
199
|
+
|
200
|
+
def timeout
|
201
|
+
@logger.debug('job timed out')
|
202
|
+
super
|
203
|
+
end
|
204
|
+
|
205
|
+
def timeout_interval
|
206
|
+
execution_timeout_interval
|
207
|
+
end
|
208
|
+
|
209
|
+
def with_retries
|
210
|
+
tries = 0
|
211
|
+
begin
|
212
|
+
yield
|
213
|
+
rescue StandardError => e
|
214
|
+
logger.error("Unexpected error: #{e.class} #{e.message}\n #{e.backtrace.join("\n")}")
|
215
|
+
tries += 1
|
216
|
+
if tries <= MAX_PROCESS_RETRIES
|
217
|
+
logger.error('Retrying')
|
218
|
+
retry
|
219
|
+
else
|
220
|
+
publish_exception('Unexpected error', e)
|
221
|
+
end
|
222
|
+
end
|
223
|
+
end
|
224
|
+
|
225
|
+
def with_disconnect_handling
|
226
|
+
yield
|
227
|
+
rescue IOError, Net::SSH::Disconnect => e
|
228
|
+
@session.shutdown!
|
229
|
+
check_expecting_disconnect
|
230
|
+
if @expecting_disconnect
|
231
|
+
publish_exit_status(0)
|
232
|
+
else
|
233
|
+
publish_exception('Unexpected disconnect', e)
|
234
|
+
end
|
235
|
+
end
|
236
|
+
|
237
|
+
def close
|
238
|
+
run_sync("rm -rf \"#{remote_command_dir}\"") if should_cleanup?
|
239
|
+
rescue StandardError => e
|
240
|
+
publish_exception('Error when removing remote working dir', e, false)
|
241
|
+
ensure
|
242
|
+
@session.close if @session && !@session.closed?
|
243
|
+
FileUtils.rm_rf(local_command_dir) if Dir.exist?(local_command_dir) && @cleanup_working_dirs
|
244
|
+
end
|
245
|
+
|
246
|
+
def publish_data(data, type)
|
247
|
+
super(data.force_encoding('UTF-8'), type)
|
248
|
+
end
|
249
|
+
|
250
|
+
private
|
251
|
+
|
252
|
+
def indent_multiline(string)
|
253
|
+
string.lines.map { |line| " | #{line}" }.join
|
254
|
+
end
|
255
|
+
|
256
|
+
def should_cleanup?
|
257
|
+
@session && !@session.closed? && @cleanup_working_dirs
|
258
|
+
end
|
259
|
+
|
260
|
+
def session
|
261
|
+
@session ||= begin
|
262
|
+
@logger.debug("opening session to #{@ssh_user}@#{@host}")
|
263
|
+
Net::SSH.start(@host, @ssh_user, ssh_options)
|
264
|
+
end
|
265
|
+
end
|
266
|
+
|
267
|
+
def ssh_options
|
268
|
+
ssh_options = {}
|
269
|
+
ssh_options[:port] = @ssh_port if @ssh_port
|
270
|
+
ssh_options[:keys] = [@client_private_key_file] if @client_private_key_file
|
271
|
+
ssh_options[:password] = @ssh_password if @ssh_password
|
272
|
+
ssh_options[:passphrase] = @key_passphrase if @key_passphrase
|
273
|
+
ssh_options[:keys_only] = true
|
274
|
+
# if the host public key is contained in the known_hosts_file,
|
275
|
+
# verify it, otherwise, if missing, import it and continue
|
276
|
+
ssh_options[:paranoid] = true
|
277
|
+
ssh_options[:auth_methods] = available_authentication_methods
|
278
|
+
ssh_options[:user_known_hosts_file] = prepare_known_hosts if @host_public_key
|
279
|
+
ssh_options[:number_of_password_prompts] = 1
|
280
|
+
ssh_options[:verbose] = settings[:ssh_log_level]
|
281
|
+
ssh_options[:logger] = Proxy::RemoteExecution::Ssh::LogFilter.new(SmartProxyDynflowCore::Log.instance)
|
282
|
+
return ssh_options
|
283
|
+
end
|
284
|
+
|
285
|
+
def settings
|
286
|
+
Proxy::RemoteExecution::Ssh::Plugin.settings
|
287
|
+
end
|
288
|
+
|
289
|
+
# Initiates run of the remote command and yields the data when
|
290
|
+
# available. The yielding doesn't happen automatically, but as
|
291
|
+
# part of calling the `refresh` method.
|
292
|
+
def run_async(command)
|
293
|
+
raise 'Async command already in progress' if @started
|
294
|
+
|
295
|
+
@started = false
|
296
|
+
@user_method.reset
|
297
|
+
|
298
|
+
session.open_channel do |channel|
|
299
|
+
channel.request_pty
|
300
|
+
channel.on_data do |ch, data|
|
301
|
+
publish_data(data, 'stdout') unless @user_method.filter_password?(data)
|
302
|
+
@user_method.on_data(data, ch)
|
303
|
+
end
|
304
|
+
channel.on_extended_data { |ch, type, data| publish_data(data, 'stderr') }
|
305
|
+
# standard exit of the command
|
306
|
+
channel.on_request('exit-status') { |ch, data| publish_exit_status(data.read_long) }
|
307
|
+
# on signal: sending the signal value (such as 'TERM')
|
308
|
+
channel.on_request('exit-signal') do |ch, data|
|
309
|
+
publish_exit_status(data.read_string)
|
310
|
+
ch.close
|
311
|
+
# wait for the channel to finish so that we know at the end
|
312
|
+
# that the session is inactive
|
313
|
+
ch.wait
|
314
|
+
end
|
315
|
+
channel.exec(command) do |_, success|
|
316
|
+
@started = true
|
317
|
+
raise('Error initializing command') unless success
|
318
|
+
end
|
319
|
+
end
|
320
|
+
session.process(0) { !run_started? }
|
321
|
+
return true
|
322
|
+
end
|
323
|
+
|
324
|
+
def run_started?
|
325
|
+
@started && @user_method.sent_all_data?
|
326
|
+
end
|
327
|
+
|
328
|
+
def run_sync(command, stdin = nil)
|
329
|
+
stdout = ''
|
330
|
+
stderr = ''
|
331
|
+
exit_status = nil
|
332
|
+
started = false
|
333
|
+
|
334
|
+
channel = session.open_channel do |ch|
|
335
|
+
ch.on_data do |c, data|
|
336
|
+
stdout.concat(data)
|
337
|
+
end
|
338
|
+
ch.on_extended_data { |_, _, data| stderr.concat(data) }
|
339
|
+
ch.on_request('exit-status') { |_, data| exit_status = data.read_long }
|
340
|
+
# Send data to stdin if we have some
|
341
|
+
ch.send_data(stdin) unless stdin.nil?
|
342
|
+
# on signal: sending the signal value (such as 'TERM')
|
343
|
+
ch.on_request('exit-signal') do |_, data|
|
344
|
+
exit_status = data.read_string
|
345
|
+
ch.close
|
346
|
+
ch.wait
|
347
|
+
end
|
348
|
+
ch.exec command do |_, success|
|
349
|
+
raise 'could not execute command' unless success
|
350
|
+
|
351
|
+
started = true
|
352
|
+
end
|
353
|
+
end
|
354
|
+
session.process(0) { !started }
|
355
|
+
# Closing the channel without sending any data gives us SIGPIPE
|
356
|
+
channel.close unless stdin.nil?
|
357
|
+
channel.wait
|
358
|
+
return exit_status, stdout, stderr
|
359
|
+
end
|
360
|
+
|
361
|
+
def prepare_known_hosts
|
362
|
+
path = local_command_file('known_hosts')
|
363
|
+
if @host_public_key
|
364
|
+
write_command_file_locally('known_hosts', "#{@host} #{@host_public_key}")
|
365
|
+
end
|
366
|
+
return path
|
367
|
+
end
|
368
|
+
|
369
|
+
def local_command_dir
|
370
|
+
File.join(@local_working_dir, 'foreman-proxy', "foreman-ssh-cmd-#{@id}")
|
371
|
+
end
|
372
|
+
|
373
|
+
def local_command_file(filename)
|
374
|
+
File.join(local_command_dir, filename)
|
375
|
+
end
|
376
|
+
|
377
|
+
def remote_command_dir
|
378
|
+
File.join(@remote_working_dir, "foreman-ssh-cmd-#{id}")
|
379
|
+
end
|
380
|
+
|
381
|
+
def remote_command_file(filename)
|
382
|
+
File.join(remote_command_dir, filename)
|
383
|
+
end
|
384
|
+
|
385
|
+
def ensure_local_directory(path)
|
386
|
+
if File.exist?(path)
|
387
|
+
raise "#{path} expected to be a directory" unless File.directory?(path)
|
388
|
+
else
|
389
|
+
FileUtils.mkdir_p(path)
|
390
|
+
end
|
391
|
+
return path
|
392
|
+
end
|
393
|
+
|
394
|
+
def cp_script_to_remote(script = @script, name = 'script')
|
395
|
+
path = remote_command_file(name)
|
396
|
+
@logger.debug("copying script to #{path}:\n#{indent_multiline(script)}")
|
397
|
+
upload_data(sanitize_script(script), path, 555)
|
398
|
+
end
|
399
|
+
|
400
|
+
def upload_data(data, path, permissions = 555)
|
401
|
+
ensure_remote_directory File.dirname(path)
|
402
|
+
# We use tee here to pipe stdin coming from ssh to a file at $path, while silencing its output
|
403
|
+
# This is used to write to $path with elevated permissions, solutions using cat and output redirection
|
404
|
+
# would not work, because the redirection would happen in the non-elevated shell.
|
405
|
+
command = "tee '#{path}' >/dev/null && chmod '#{permissions}' '#{path}'"
|
406
|
+
|
407
|
+
@logger.debug("Sending data to #{path} on remote host:\n#{data}")
|
408
|
+
status, _out, err = run_sync(command, data)
|
409
|
+
|
410
|
+
@logger.warn("Output on stderr while uploading #{path}:\n#{err}") unless err.empty?
|
411
|
+
if status != 0
|
412
|
+
raise "Unable to upload file to #{path} on remote system: exit code: #{status}"
|
413
|
+
end
|
414
|
+
|
415
|
+
path
|
416
|
+
end
|
417
|
+
|
418
|
+
def upload_file(local_path, remote_path)
|
419
|
+
mode = File.stat(local_path).mode.to_s(8)[-3..-1]
|
420
|
+
@logger.debug("Uploading local file: #{local_path} as #{remote_path} with #{mode} permissions")
|
421
|
+
upload_data(File.read(local_path), remote_path, mode)
|
422
|
+
end
|
423
|
+
|
424
|
+
def ensure_remote_directory(path)
|
425
|
+
exit_code, _output, err = run_sync("mkdir -p #{path}")
|
426
|
+
if exit_code != 0
|
427
|
+
raise "Unable to create directory on remote system #{path}: exit code: #{exit_code}\n #{err}"
|
428
|
+
end
|
429
|
+
end
|
430
|
+
|
431
|
+
def sanitize_script(script)
|
432
|
+
script.tr("\r", '')
|
433
|
+
end
|
434
|
+
|
435
|
+
def write_command_file_locally(filename, content)
|
436
|
+
path = local_command_file(filename)
|
437
|
+
ensure_local_directory(File.dirname(path))
|
438
|
+
File.write(path, content)
|
439
|
+
return path
|
440
|
+
end
|
441
|
+
|
442
|
+
# when a remote server disconnects, it's hard to tell if it was on purpose (when calling reboot)
|
443
|
+
# or it's an error. When it's expected, we expect the script to produce 'restart host' as
|
444
|
+
# its last command output
|
445
|
+
def check_expecting_disconnect
|
446
|
+
last_output = @continuous_output.raw_outputs.find { |d| d['output_type'] == 'stdout' }
|
447
|
+
return unless last_output
|
448
|
+
|
449
|
+
if EXPECTED_POWER_ACTION_MESSAGES.any? { |message| last_output['output'] =~ /^#{message}/ }
|
450
|
+
@expecting_disconnect = true
|
451
|
+
end
|
452
|
+
end
|
453
|
+
|
454
|
+
def available_authentication_methods
|
455
|
+
methods = %w[publickey] # Always use pubkey auth as fallback
|
456
|
+
if settings[:kerberos_auth]
|
457
|
+
if defined? Net::SSH::Kerberos
|
458
|
+
methods << 'gssapi-with-mic'
|
459
|
+
else
|
460
|
+
@logger.warn('Kerberos authentication requested but not available')
|
461
|
+
end
|
462
|
+
end
|
463
|
+
methods.unshift('password') if @ssh_password
|
464
|
+
|
465
|
+
methods
|
466
|
+
end
|
467
|
+
end
|
468
|
+
# rubocop:enable Metrics/ClassLength
|
469
|
+
end
|
metadata
CHANGED
@@ -1,14 +1,14 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: smart_proxy_remote_execution_ssh
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.
|
4
|
+
version: 0.4.0
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Ivan Nečas
|
8
8
|
autorequire:
|
9
9
|
bindir: bin
|
10
10
|
cert_chain: []
|
11
|
-
date:
|
11
|
+
date: 2021-06-07 00:00:00.000000000 Z
|
12
12
|
dependencies:
|
13
13
|
- !ruby/object:Gem::Dependency
|
14
14
|
name: bundler
|
@@ -108,6 +108,20 @@ dependencies:
|
|
108
108
|
- - "~>"
|
109
109
|
- !ruby/object:Gem::Version
|
110
110
|
version: 0.82.0
|
111
|
+
- !ruby/object:Gem::Dependency
|
112
|
+
name: foreman-tasks-core
|
113
|
+
requirement: !ruby/object:Gem::Requirement
|
114
|
+
requirements:
|
115
|
+
- - ">="
|
116
|
+
- !ruby/object:Gem::Version
|
117
|
+
version: 0.3.1
|
118
|
+
type: :runtime
|
119
|
+
prerelease: false
|
120
|
+
version_requirements: !ruby/object:Gem::Requirement
|
121
|
+
requirements:
|
122
|
+
- - ">="
|
123
|
+
- !ruby/object:Gem::Version
|
124
|
+
version: 0.3.1
|
111
125
|
- !ruby/object:Gem::Dependency
|
112
126
|
name: smart_proxy_dynflow
|
113
127
|
requirement: !ruby/object:Gem::Requirement
|
@@ -149,10 +163,19 @@ files:
|
|
149
163
|
- README.md
|
150
164
|
- bundler.plugins.d/remote_execution_ssh.rb
|
151
165
|
- lib/smart_proxy_remote_execution_ssh.rb
|
166
|
+
- lib/smart_proxy_remote_execution_ssh/actions/run_script.rb
|
152
167
|
- lib/smart_proxy_remote_execution_ssh/api.rb
|
168
|
+
- lib/smart_proxy_remote_execution_ssh/async_scripts/control.sh
|
169
|
+
- lib/smart_proxy_remote_execution_ssh/async_scripts/retrieve.sh
|
153
170
|
- lib/smart_proxy_remote_execution_ssh/cockpit.rb
|
171
|
+
- lib/smart_proxy_remote_execution_ssh/dispatcher.rb
|
154
172
|
- lib/smart_proxy_remote_execution_ssh/http_config.ru
|
173
|
+
- lib/smart_proxy_remote_execution_ssh/log_filter.rb
|
155
174
|
- lib/smart_proxy_remote_execution_ssh/plugin.rb
|
175
|
+
- lib/smart_proxy_remote_execution_ssh/runners.rb
|
176
|
+
- lib/smart_proxy_remote_execution_ssh/runners/fake_script_runner.rb
|
177
|
+
- lib/smart_proxy_remote_execution_ssh/runners/polling_script_runner.rb
|
178
|
+
- lib/smart_proxy_remote_execution_ssh/runners/script_runner.rb
|
156
179
|
- lib/smart_proxy_remote_execution_ssh/version.rb
|
157
180
|
- lib/smart_proxy_remote_execution_ssh/webrick_ext.rb
|
158
181
|
- settings.d/remote_execution_ssh.yml.example
|