foreman_remote_execution_core 1.3.1 → 1.5.0

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: e1576329ece4e20e9506b989b77557686d030c329c71f70386c7415e93a73e24
4
- data.tar.gz: 4021d34ddbaa469bb1f4152c0a3c404c976a97910422a6fd60868661992ab774
3
+ metadata.gz: f9959aa1c94136ee271de608d9f9767f46e0c6d628f8a043420dfac7e54177c8
4
+ data.tar.gz: 7a961ebb1cf1b044e005f43b3ce727b9966fbcf7c17bfcd21307a20db408b662
5
5
  SHA512:
6
- metadata.gz: b0ce710cc252baca86acb51cfe294b76f8de319b5516bc8f303252fff52a8ef1e074c34ea00f7bd179b2811dc85daf292035a2736cb421046aa46e2a37137d73
7
- data.tar.gz: 65092102cb80436b6de9ce165905e8d87c387bf70256b244224d780cd2335f43b0323a06afed5c72e78fa8297566dcbe01f0624dee89b25645d8e2bf292d91a4
6
+ metadata.gz: d3d3cbc724bf1d38f8d9e9a2ef0be35fa604ee8482ec7036a3b6d23cd97b86fb405ff7de4bc9b2c988b8036c6b7ef14d9dcbb2fec8899d14c1668620be8b50fe
7
+ data.tar.gz: 5abf6e164e855b066efdd70bf7b0770acc62ed76aa3c742a28f6b546f79f4042e9e98d4c4f59da06b96bcb66061f124f8a4e7e4b2575057c393dc8ae9a3ea707
@@ -1,82 +1,8 @@
1
- require 'foreman_tasks_core'
2
-
3
1
  module ForemanRemoteExecutionCore
4
- extend ForemanTasksCore::SettingsLoader
5
- register_settings([:remote_execution_ssh, :smart_proxy_remote_execution_ssh_core],
6
- :ssh_identity_key_file => '~/.ssh/id_rsa_foreman_proxy',
7
- :ssh_user => 'root',
8
- :remote_working_dir => '/var/tmp',
9
- :local_working_dir => '/var/tmp',
10
- :kerberos_auth => false,
11
- :async_ssh => false,
12
- # When set to nil, makes REX use the runner's default interval
13
- :runner_refresh_interval => nil,
14
- :ssh_log_level => :fatal,
15
- :cleanup_working_dirs => true)
16
-
17
- SSH_LOG_LEVELS = %w(debug info warn error fatal).freeze
18
-
19
- def self.simulate?
20
- %w(yes true 1).include? ENV.fetch('REX_SIMULATE', '').downcase
21
- end
2
+ require 'smart_proxy_remote_execution_ssh'
3
+ require 'foreman_remote_execution_core/actions'
22
4
 
23
- def self.validate_settings!
24
- super
25
- self.validate_ssh_log_level!
26
- @settings[:ssh_log_level] = @settings[:ssh_log_level].to_sym
5
+ def self.settings
6
+ Proxy::RemoteExecution::Ssh::Plugin.settings
27
7
  end
28
-
29
- def self.validate_ssh_log_level!
30
- wanted_level = @settings[:ssh_log_level].to_s
31
- unless SSH_LOG_LEVELS.include? wanted_level
32
- raise "Wrong value '#{@settings[:ssh_log_level]}' for ssh_log_level, must be one of #{SSH_LOG_LEVELS.join(', ')}"
33
- end
34
-
35
- current = if defined?(::Proxy::SETTINGS)
36
- ::Proxy::SETTINGS.log_level.to_s.downcase
37
- elsif defined?(SmartProxyDynflowCore)
38
- SmartProxyDynflowCore::SETTINGS.log_level.to_s.downcase
39
- else
40
- Rails.configuration.log_level.to_s
41
- end
42
-
43
- # regular log levels correspond to upcased ssh logger levels
44
- ssh, regular = [wanted_level, current].map do |wanted|
45
- SSH_LOG_LEVELS.each_with_index.find { |value, _index| value == wanted }.last
46
- end
47
-
48
- if ssh < regular
49
- raise 'ssh_log_level cannot be more verbose than regular log level'
50
- end
51
- end
52
-
53
- def self.runner_class
54
- @runner_class ||= if simulate?
55
- FakeScriptRunner
56
- elsif settings[:async_ssh]
57
- PollingScriptRunner
58
- else
59
- ScriptRunner
60
- end
61
- end
62
-
63
- if ForemanTasksCore.dynflow_present?
64
- require 'foreman_tasks_core/runner'
65
- require 'foreman_remote_execution_core/log_filter'
66
- if simulate?
67
- # Load the fake implementation of the script runner if debug is enabled
68
- require 'foreman_remote_execution_core/fake_script_runner'
69
- else
70
- require 'foreman_remote_execution_core/script_runner'
71
- require 'foreman_remote_execution_core/polling_script_runner'
72
- end
73
- require 'foreman_remote_execution_core/dispatcher'
74
- require 'foreman_remote_execution_core/actions'
75
-
76
- if defined?(::SmartProxyDynflowCore)
77
- SmartProxyDynflowCore::TaskLauncherRegistry.register('ssh', ForemanTasksCore::TaskLauncher::Batch)
78
- end
79
- end
80
-
81
- require 'foreman_remote_execution_core/version'
82
8
  end
@@ -1,20 +1,6 @@
1
- require 'foreman_tasks_core/shareable_action'
2
-
3
1
  module ForemanRemoteExecutionCore
4
2
  module Actions
5
- class RunScript < ForemanTasksCore::Runner::Action
6
- def initiate_runner
7
- additional_options = {
8
- :step_id => run_step_id,
9
- :uuid => execution_plan_id,
10
- }
11
- ForemanRemoteExecutionCore.runner_class.build(input.merge(additional_options),
12
- suspended_action: suspended_action)
13
- end
14
-
15
- def runner_dispatcher
16
- ForemanRemoteExecutionCore::Dispatcher.instance
17
- end
18
- end
3
+ require 'smart_proxy_remote_execution_ssh/actions/run_script'
4
+ RunScript = Proxy::RemoteExecution::Ssh::Actions::RunScript
19
5
  end
20
6
  end
@@ -1,3 +1,3 @@
1
1
  module ForemanRemoteExecutionCore
2
- VERSION = '1.3.1'.freeze
2
+ VERSION = '1.5.0'.freeze
3
3
  end
metadata CHANGED
@@ -1,15 +1,43 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: foreman_remote_execution_core
3
3
  version: !ruby/object:Gem::Version
4
- version: 1.3.1
4
+ version: 1.5.0
5
5
  platform: ruby
6
6
  authors:
7
7
  - Ivan Nečas
8
- autorequire:
8
+ autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2020-07-14 00:00:00.000000000 Z
11
+ date: 2021-06-07 00:00:00.000000000 Z
12
12
  dependencies:
13
+ - !ruby/object:Gem::Dependency
14
+ name: bcrypt_pbkdf
15
+ requirement: !ruby/object:Gem::Requirement
16
+ requirements:
17
+ - - ">="
18
+ - !ruby/object:Gem::Version
19
+ version: '0'
20
+ type: :runtime
21
+ prerelease: false
22
+ version_requirements: !ruby/object:Gem::Requirement
23
+ requirements:
24
+ - - ">="
25
+ - !ruby/object:Gem::Version
26
+ version: '0'
27
+ - !ruby/object:Gem::Dependency
28
+ name: ed25519
29
+ requirement: !ruby/object:Gem::Requirement
30
+ requirements:
31
+ - - ">="
32
+ - !ruby/object:Gem::Version
33
+ version: '0'
34
+ type: :runtime
35
+ prerelease: false
36
+ version_requirements: !ruby/object:Gem::Requirement
37
+ requirements:
38
+ - - ">="
39
+ - !ruby/object:Gem::Version
40
+ version: '0'
13
41
  - !ruby/object:Gem::Dependency
14
42
  name: foreman-tasks-core
15
43
  requirement: !ruby/object:Gem::Requirement
@@ -38,6 +66,20 @@ dependencies:
38
66
  - - ">="
39
67
  - !ruby/object:Gem::Version
40
68
  version: '0'
69
+ - !ruby/object:Gem::Dependency
70
+ name: smart_proxy_remote_execution_ssh
71
+ requirement: !ruby/object:Gem::Requirement
72
+ requirements:
73
+ - - ">="
74
+ - !ruby/object:Gem::Version
75
+ version: 0.4.0
76
+ type: :runtime
77
+ prerelease: false
78
+ version_requirements: !ruby/object:Gem::Requirement
79
+ requirements:
80
+ - - ">="
81
+ - !ruby/object:Gem::Version
82
+ version: 0.4.0
41
83
  description: " Ssh remote execution provider code sharable between Foreman and Foreman-Proxy\n"
42
84
  email:
43
85
  - inecas@redhat.com
@@ -48,19 +90,12 @@ files:
48
90
  - LICENSE
49
91
  - lib/foreman_remote_execution_core.rb
50
92
  - lib/foreman_remote_execution_core/actions.rb
51
- - lib/foreman_remote_execution_core/async_scripts/control.sh
52
- - lib/foreman_remote_execution_core/async_scripts/retrieve.sh
53
- - lib/foreman_remote_execution_core/dispatcher.rb
54
- - lib/foreman_remote_execution_core/fake_script_runner.rb
55
- - lib/foreman_remote_execution_core/log_filter.rb
56
- - lib/foreman_remote_execution_core/polling_script_runner.rb
57
- - lib/foreman_remote_execution_core/script_runner.rb
58
93
  - lib/foreman_remote_execution_core/version.rb
59
94
  homepage: https://github.com/theforeman/foreman_remote_execution
60
95
  licenses:
61
96
  - GPL-3.0
62
97
  metadata: {}
63
- post_install_message:
98
+ post_install_message:
64
99
  rdoc_options: []
65
100
  require_paths:
66
101
  - lib
@@ -75,8 +110,8 @@ required_rubygems_version: !ruby/object:Gem::Requirement
75
110
  - !ruby/object:Gem::Version
76
111
  version: '0'
77
112
  requirements: []
78
- rubygems_version: 3.0.3
79
- signing_key:
113
+ rubygems_version: 3.1.2
114
+ signing_key:
80
115
  specification_version: 4
81
116
  summary: Foreman remote execution - core bits
82
117
  test_files: []
@@ -1,110 +0,0 @@
1
- #!/bin/sh
2
- #
3
- # Control script for the remote execution jobs.
4
- #
5
- # The initial script calls `$CONTROL_SCRIPT init-script-finish` once the original script exits.
6
- # In automatic mode, the exit code is sent back to the proxy on `init-script-finish`.
7
- #
8
- # What the script provides is also a manual mode, where the author of the rex script can take
9
- # full control of the job lifecycle. This allows keeping the marked as running even when
10
- # the initial script finishes.
11
- #
12
- # The manual mode is turned on by calling `$CONTROL_SCRIPT manual-control`. After calling this,
13
- # one can call `echo message | $CONTROL_SCRIPT update` to send output to the remote execution jobs
14
- # and `$CONTROL_SCRIPT finish 0` once finished (with 0 as exit code) to send output to the remote execution jobs
15
- # and `$CONTROL_SCRIPT finish 0` once finished (with 0 as exit code)
16
- BASE_DIR="$(dirname "$(readlink -f "$0")")"
17
-
18
- if ! command -v curl >/dev/null; then
19
- echo 'curl is required' >&2
20
- exit 1
21
- fi
22
-
23
- # send the callback data to proxy
24
- update() {
25
- "$BASE_DIR/retrieve.sh" push_update
26
- }
27
-
28
- # wait for named pipe $1 to retrieve data. If $2 is provided, it serves as timeout
29
- # in seconds on how long to wait when reading.
30
- wait_for_pipe() {
31
- pipe_path=$1
32
- if [ -n "$2" ]; then
33
- timeout="-t $2"
34
- fi
35
- if read $timeout <>"$pipe_path"; then
36
- rm "$pipe_path"
37
- return 0
38
- else
39
- return 1
40
- fi
41
- }
42
-
43
- # function run in background, when receiving update data via STDIN.
44
- periodic_update() {
45
- interval=1
46
- # reading some data from periodic_update_control signals we're done
47
- while ! wait_for_pipe "$BASE_DIR/periodic_update_control" "$interval"; do
48
- update
49
- done
50
- # one more update before we finish
51
- update
52
- # signal the main process that we are finished
53
- echo > "$BASE_DIR/periodic_update_finished"
54
- }
55
-
56
- # signal the periodic_update process that the main process is finishing
57
- periodic_update_finish() {
58
- if [ -e "$BASE_DIR/periodic_update_control" ]; then
59
- echo > "$BASE_DIR/periodic_update_control"
60
- fi
61
- }
62
-
63
- ACTION=${1:-finish}
64
-
65
- case "$ACTION" in
66
- init-script-finish)
67
- if ! [ -e "$BASE_DIR/manual_mode" ]; then
68
- # make the exit code of initialization script the exit code of the whole job
69
- cp init_exit_code exit_code
70
- update
71
- fi
72
- ;;
73
- finish)
74
- # take exit code passed via the command line, with fallback
75
- # to the exit code of the initialization script
76
- exit_code=${2:-$(cat "$BASE_DIR/init_exit_code")}
77
- echo $exit_code > "$BASE_DIR/exit_code"
78
- update
79
- if [ -e "$BASE_DIR/manual_mode" ]; then
80
- rm "$BASE_DIR/manual_mode"
81
- fi
82
- ;;
83
- update)
84
- # read data from input when redirected though a pipe
85
- if ! [ -t 0 ]; then
86
- # couple of named pipes to coordinate the main process with the periodic_update
87
- mkfifo "$BASE_DIR/periodic_update_control"
88
- mkfifo "$BASE_DIR/periodic_update_finished"
89
- trap "periodic_update_finish" EXIT
90
- # run periodic update as separate process to keep sending updates in output to server
91
- periodic_update &
92
- # redirect the input into output
93
- tee -a "$BASE_DIR/output"
94
- periodic_update_finish
95
- # ensure the periodic update finished before we return
96
- wait_for_pipe "$BASE_DIR/periodic_update_finished"
97
- else
98
- update
99
- fi
100
- ;;
101
- # mark the script to be in manual mode: this means the script author needs to use `update` and `finish`
102
- # commands to send output to the remote execution job or mark it as finished.
103
- manual-mode)
104
- touch "$BASE_DIR/manual_mode"
105
- ;;
106
- *)
107
- echo "Unknown action $ACTION"
108
- exit 1
109
- ;;
110
- esac
@@ -1,151 +0,0 @@
1
- #!/bin/sh
2
-
3
- if ! pgrep --help 2>/dev/null >/dev/null; then
4
- echo DONE 1
5
- echo "pgrep is required" >&2
6
- exit 1
7
- fi
8
-
9
- BASE_DIR="$(dirname "$(readlink -f "$0")")"
10
-
11
- # load the data required for generating the callback
12
- . "$BASE_DIR/env.sh"
13
- URL_PREFIX="$CALLBACK_HOST/dynflow/tasks/$TASK_ID"
14
- AUTH="$TASK_ID:$OTP"
15
- CURL="curl --silent --show-error --fail --max-time 10"
16
-
17
- MY_LOCK_FILE="$BASE_DIR/retrieve_lock.$$"
18
- MY_PID=$$
19
- echo $MY_PID >"$MY_LOCK_FILE"
20
- LOCK_FILE="$BASE_DIR/retrieve_lock"
21
- TMP_OUTPUT_FILE="$BASE_DIR/tmp_output"
22
-
23
- RUN_TIMEOUT=30 # for how long can the script hold the lock
24
- WAIT_TIMEOUT=60 # for how long the script is trying to acquire the lock
25
- START_TIME=$(date +%s)
26
-
27
- fail() {
28
- echo RUNNING
29
- echo "$1"
30
- exit 1
31
- }
32
-
33
- acquire_lock() {
34
- # try to acquire lock by creating the file (ln should be atomic an fail in case
35
- # another process succeeded first). We also check the content of the lock file,
36
- # in case our process won when competing over the lock while invalidating
37
- # the lock on timeout.
38
- ln "$MY_LOCK_FILE" "$LOCK_FILE" 2>/dev/null || [ "$(head -n1 "$LOCK_FILE")" = "$MY_PID" ]
39
- return $?
40
- }
41
-
42
- # acquiring the lock before proceeding, to ensure only one instance of the script is running
43
- while ! acquire_lock; do
44
- # we failed to create retrieve_lock - assuming there is already another retrieve script running
45
- current_pid=$(head -n1 "$LOCK_FILE")
46
- if [ -z "$current_pid" ]; then
47
- continue
48
- fi
49
- # check whether the lock is not too old (compared to $RUN_TIMEOUT) and try to kill
50
- # if it is, so that we don't have a stalled processes here
51
- lock_lines_count=$(wc -l < "$LOCK_FILE")
52
- current_lock_time=$(stat --format "%Y" "$LOCK_FILE")
53
- current_time=$(date +%s)
54
-
55
- if [ "$(( current_time - START_TIME ))" -gt "$WAIT_TIMEOUT" ]; then
56
- # We were waiting for the lock for too long - just give up
57
- fail "Wait time exceeded $WAIT_TIMEOUT"
58
- elif [ "$(( current_time - current_lock_time ))" -gt "$RUN_TIMEOUT" ]; then
59
- # The previous lock it hold for too long - re-acquiring procedure
60
- if [ "$lock_lines_count" -gt 1 ]; then
61
- # there were multiple processes waiting for lock without resolution
62
- # longer than the $RUN_TIMEOUT - we reset the lock file and let processes
63
- # to compete
64
- echo "RETRY" > "$LOCK_FILE"
65
- fi
66
- if [ "$current_pid" != "RETRY" ]; then
67
- # try to kill the currently stalled process
68
- kill -9 "$current_pid" 2>/dev/null
69
- fi
70
- # try to add our process as one candidate
71
- echo $MY_PID >> "$LOCK_FILE"
72
- if [ "$( head -n2 "$LOCK_FILE" | tail -n1 )" = "$MY_PID" ]; then
73
- # our process won the competition for the new lock: it is the first pid
74
- # after the original one in the lock file - take ownership of the lock
75
- # next iteration only this process will get through
76
- echo $MY_PID >"$LOCK_FILE"
77
- fi
78
- else
79
- # still waiting for the original owner to finish
80
- sleep 1
81
- fi
82
- done
83
-
84
- release_lock() {
85
- rm "$MY_LOCK_FILE"
86
- rm "$LOCK_FILE"
87
- }
88
- # ensure the release the lock at exit
89
- trap "release_lock" EXIT
90
-
91
- # make sure we clear previous tmp output file
92
- if [ -e "$TMP_OUTPUT_FILE" ]; then
93
- rm "$TMP_OUTPUT_FILE"
94
- fi
95
-
96
- pid=$(cat "$BASE_DIR/pid")
97
- [ -f "$BASE_DIR/position" ] || echo 1 > "$BASE_DIR/position"
98
- position=$(cat "$BASE_DIR/position")
99
-
100
- prepare_output() {
101
- if [ -e "$BASE_DIR/manual_mode" ] || ([ -n "$pid" ] && pgrep -P "$pid" >/dev/null 2>&1); then
102
- echo RUNNING
103
- else
104
- echo "DONE $(cat "$BASE_DIR/exit_code" 2>/dev/null)"
105
- fi
106
- [ -f "$BASE_DIR/output" ] || exit 0
107
- tail --bytes "+${position}" "$BASE_DIR/output" > "$TMP_OUTPUT_FILE"
108
- cat "$TMP_OUTPUT_FILE"
109
- }
110
-
111
- # prepare the callback payload
112
- payload() {
113
- if [ -n "$1" ]; then
114
- exit_code="$1"
115
- else
116
- exit_code=null
117
- fi
118
-
119
- if [ -e "$BASE_DIR/manual_mode" ]; then
120
- manual_mode=true
121
- output=$(prepare_output | base64 -w0)
122
- else
123
- manual_mode=false
124
- fi
125
-
126
- echo "{ \"exit_code\": $exit_code,"\
127
- " \"step_id\": \"$STEP_ID\","\
128
- " \"manual_mode\": $manual_mode,"\
129
- " \"output\": \"$output\" }"
130
- }
131
-
132
- if [ "$1" = "push_update" ]; then
133
- if [ -e "$BASE_DIR/exit_code" ]; then
134
- exit_code="$(cat "$BASE_DIR/exit_code")"
135
- action="done"
136
- else
137
- exit_code=""
138
- action="update"
139
- fi
140
- $CURL -X POST -d "$(payload $exit_code)" -u "$AUTH" "$URL_PREFIX"/$action 2>>"$BASE_DIR/curl_stderr"
141
- success=$?
142
- else
143
- prepare_output
144
- success=$?
145
- fi
146
-
147
- if [ "$success" = 0 ] && [ -e "$TMP_OUTPUT_FILE" ]; then
148
- # in case the retrieval was successful, move the position of the cursor to be read next time
149
- bytes=$(wc --bytes < "$TMP_OUTPUT_FILE")
150
- expr "${position}" + "${bytes}" > "$BASE_DIR/position"
151
- fi
@@ -1,12 +0,0 @@
1
- require 'foreman_tasks_core/runner/dispatcher'
2
-
3
- module ForemanRemoteExecutionCore
4
- class Dispatcher < ::ForemanTasksCore::Runner::Dispatcher
5
-
6
- def refresh_interval
7
- @refresh_interval ||= ForemanRemoteExecutionCore.settings[:runner_refresh_interval] ||
8
- ForemanRemoteExecutionCore.runner_class::DEFAULT_REFRESH_INTERVAL
9
- end
10
-
11
- end
12
- end
@@ -1,87 +0,0 @@
1
- module ForemanRemoteExecutionCore
2
- class FakeScriptRunner < ForemanTasksCore::Runner::Base
3
- DEFAULT_REFRESH_INTERVAL = 1
4
-
5
- @data = []
6
-
7
- class << self
8
- attr_accessor :data
9
-
10
- def load_data(path = nil)
11
- if path.nil?
12
- @data = <<-END.gsub(/^\s+\| ?/, '').lines
13
- | ====== Simulated Remote Execution ======
14
- |
15
- | This is an output of a simulated remote
16
- | execution run. It should run for about
17
- | 5 seconds and finish successfully.
18
- END
19
- else
20
- File.open(File.expand_path(path), 'r') do |f|
21
- @data = f.readlines.map(&:chomp)
22
- end
23
- end
24
- @data.freeze
25
- end
26
-
27
- def build(options, suspended_action:)
28
- new(options, suspended_action: suspended_action)
29
- end
30
- end
31
-
32
- def initialize(*args)
33
- super
34
- # Load the fake output the first time its needed
35
- self.class.load_data(ENV['REX_SIMULATE_PATH']) unless self.class.data.frozen?
36
- @position = 0
37
- end
38
-
39
- def start
40
- refresh
41
- end
42
-
43
- # Do one step
44
- def refresh
45
- if done?
46
- finish
47
- else
48
- step
49
- end
50
- end
51
-
52
- def kill
53
- finish
54
- end
55
-
56
- private
57
-
58
- def finish
59
- publish_exit_status exit_code
60
- end
61
-
62
- def step
63
- publish_data(next_chunk, 'stdout')
64
- end
65
-
66
- def done?
67
- @position == self.class.data.count
68
- end
69
-
70
- def next_chunk
71
- output = self.class.data[@position]
72
- @position += 1
73
- output
74
- end
75
-
76
- # Decide if the execution should fail or not
77
- def exit_code
78
- fail_chance = ENV.fetch('REX_SIMULATE_FAIL_CHANCE', 0).to_i
79
- fail_exitcode = ENV.fetch('REX_SIMULATE_EXIT', 0).to_i
80
- if fail_exitcode == 0 || fail_chance < (Random.rand * 100).round
81
- 0
82
- else
83
- fail_exitcode
84
- end
85
- end
86
- end
87
- end
@@ -1,14 +0,0 @@
1
- module ForemanRemoteExecutionCore
2
- class LogFilter < ::Logger
3
- def initialize(base_logger)
4
- @base_logger = base_logger
5
- end
6
-
7
- def add(severity, *args, &block)
8
- severity ||= ::Logger::UNKNOWN
9
- return true if @base_logger.nil? || severity < @level
10
-
11
- @base_logger.add(severity, *args, &block)
12
- end
13
- end
14
- end
@@ -1,136 +0,0 @@
1
- require 'base64'
2
-
3
- module ForemanRemoteExecutionCore
4
- class PollingScriptRunner < ScriptRunner
5
-
6
- DEFAULT_REFRESH_INTERVAL = 60
7
-
8
- def self.load_script(name)
9
- script_dir = File.expand_path('../async_scripts', __FILE__)
10
- File.read(File.join(script_dir, name))
11
- end
12
-
13
- # The script that controls the flow of the job, able to initiate update or
14
- # finish on the task, or take over the control over script lifecycle
15
- CONTROL_SCRIPT = load_script('control.sh')
16
-
17
- # The script always outputs at least one line
18
- # First line of the output either has to begin with
19
- # "RUNNING" or "DONE $EXITCODE"
20
- # The following lines are treated as regular output
21
- RETRIEVE_SCRIPT = load_script('retrieve.sh')
22
-
23
- def initialize(options, user_method, suspended_action: nil)
24
- super(options, user_method, suspended_action: suspended_action)
25
- @callback_host = options[:callback_host]
26
- @task_id = options[:uuid]
27
- @step_id = options[:step_id]
28
- @otp = ForemanTasksCore::OtpManager.generate_otp(@task_id)
29
- end
30
-
31
- def prepare_start
32
- super
33
- @base_dir = File.dirname @remote_script
34
- upload_control_scripts
35
- end
36
-
37
- def initialization_script
38
- close_stdin = '</dev/null'
39
- close_fds = close_stdin + ' >/dev/null 2>/dev/null'
40
- main_script = "(#{@remote_script} #{close_stdin} 2>&1; echo $?>#{@base_dir}/init_exit_code) >#{@base_dir}/output"
41
- control_script_finish = "#{@control_script_path} init-script-finish"
42
- <<-SCRIPT.gsub(/^ +\| /, '')
43
- | export CONTROL_SCRIPT="#{@control_script_path}"
44
- | sh -c '#{main_script}; #{control_script_finish}' #{close_fds} &
45
- | echo $! > '#{@base_dir}/pid'
46
- SCRIPT
47
- end
48
-
49
- def trigger(*args)
50
- run_sync(*args)
51
- end
52
-
53
- def refresh
54
- err = output = nil
55
- begin
56
- _, output, err = run_sync("#{@user_method.cli_command_prefix} #{@retrieval_script}")
57
- rescue => e
58
- @logger.info("Error while connecting to the remote host on refresh: #{e.message}")
59
- end
60
- return if output.nil? || output.empty?
61
-
62
- lines = output.lines
63
- result = lines.shift.match(/^DONE (\d+)?/)
64
- publish_data(lines.join, 'stdout') unless lines.empty?
65
- publish_data(err, 'stderr') unless err.empty?
66
- if result
67
- exitcode = result[1] || 0
68
- publish_exit_status(exitcode.to_i)
69
- cleanup
70
- end
71
- ensure
72
- destroy_session
73
- end
74
-
75
- def external_event(event)
76
- data = event.data
77
- if data['manual_mode']
78
- load_event_updates(data)
79
- else
80
- # getting the update from automatic mode - reaching to the host to get the latest update
81
- return run_refresh
82
- end
83
- ensure
84
- destroy_session
85
- end
86
-
87
- def close
88
- super
89
- ForemanTasksCore::OtpManager.drop_otp(@task_id, @otp) if @otp
90
- end
91
-
92
- def upload_control_scripts
93
- return if @control_scripts_uploaded
94
-
95
- cp_script_to_remote(env_script, 'env.sh')
96
- @control_script_path = cp_script_to_remote(CONTROL_SCRIPT, 'control.sh')
97
- @retrieval_script = cp_script_to_remote(RETRIEVE_SCRIPT, 'retrieve.sh')
98
- @control_scripts_uploaded = true
99
- end
100
-
101
- # Script setting the dynamic values to env variables: it's sourced from other control scripts
102
- def env_script
103
- <<-SCRIPT.gsub(/^ +\| /, '')
104
- | CALLBACK_HOST="#{@callback_host}"
105
- | TASK_ID="#{@task_id}"
106
- | STEP_ID="#{@step_id}"
107
- | OTP="#{@otp}"
108
- SCRIPT
109
- end
110
-
111
- private
112
-
113
- # Generates updates based on the callback data from the manual mode
114
- def load_event_updates(event_data)
115
- continuous_output = ForemanTasksCore::ContinuousOutput.new
116
- if event_data.key?('output')
117
- lines = Base64.decode64(event_data['output']).sub(/\A(RUNNING|DONE).*\n/, '')
118
- continuous_output.add_output(lines, 'stdout')
119
- end
120
- cleanup if event_data['exit_code']
121
- new_update(continuous_output, event_data['exit_code'])
122
- end
123
-
124
- def cleanup
125
- run_sync("rm -rf \"#{remote_command_dir}\"") if @cleanup_working_dirs
126
- end
127
-
128
- def destroy_session
129
- if @session
130
- @logger.debug("Closing session with #{@ssh_user}@#{@host}")
131
- @session.close
132
- @session = nil
133
- end
134
- end
135
- end
136
- end
@@ -1,478 +0,0 @@
1
- require 'net/ssh'
2
- require 'fileutils'
3
-
4
- # rubocop:disable Lint/SuppressedException
5
- begin
6
- require 'net/ssh/krb'
7
- rescue LoadError; end
8
- # rubocop:enable Lint/SuppressedException:
9
-
10
- module ForemanRemoteExecutionCore
11
- class SudoUserMethod
12
- LOGIN_PROMPT = 'rex login: '.freeze
13
-
14
- attr_reader :effective_user, :ssh_user, :effective_user_password, :password_sent
15
-
16
- def initialize(effective_user, ssh_user, effective_user_password)
17
- @effective_user = effective_user
18
- @ssh_user = ssh_user
19
- @effective_user_password = effective_user_password.to_s
20
- @password_sent = false
21
- end
22
-
23
- def on_data(received_data, ssh_channel)
24
- if received_data.match(login_prompt)
25
- ssh_channel.send_data(effective_user_password + "\n")
26
- @password_sent = true
27
- end
28
- end
29
-
30
- def login_prompt
31
- LOGIN_PROMPT
32
- end
33
-
34
- def filter_password?(received_data)
35
- !@effective_user_password.empty? && @password_sent && received_data.match(Regexp.escape(@effective_user_password))
36
- end
37
-
38
- def sent_all_data?
39
- effective_user_password.empty? || password_sent
40
- end
41
-
42
- def cli_command_prefix
43
- "sudo -p '#{LOGIN_PROMPT}' -u #{effective_user} "
44
- end
45
-
46
- def reset
47
- @password_sent = false
48
- end
49
- end
50
-
51
- class DzdoUserMethod < SudoUserMethod
52
- LOGIN_PROMPT = /password/i.freeze
53
-
54
- def login_prompt
55
- LOGIN_PROMPT
56
- end
57
-
58
- def cli_command_prefix
59
- "dzdo -u #{effective_user} "
60
- end
61
- end
62
-
63
- class SuUserMethod
64
- attr_accessor :effective_user, :ssh_user
65
-
66
- def initialize(effective_user, ssh_user)
67
- @effective_user = effective_user
68
- @ssh_user = ssh_user
69
- end
70
-
71
- def on_data(_, _)
72
- end
73
-
74
- def filter_password?(received_data)
75
- false
76
- end
77
-
78
- def sent_all_data?
79
- true
80
- end
81
-
82
- def cli_command_prefix
83
- "su - #{effective_user} -c "
84
- end
85
-
86
- def reset
87
- end
88
- end
89
-
90
- class NoopUserMethod
91
- def on_data(_, _)
92
- end
93
-
94
- def filter_password?(received_data)
95
- false
96
- end
97
-
98
- def sent_all_data?
99
- true
100
- end
101
-
102
- def cli_command_prefix
103
- end
104
-
105
- def reset
106
- end
107
- end
108
-
109
- class ScriptRunner < ForemanTasksCore::Runner::Base
110
- attr_reader :execution_timeout_interval
111
-
112
- EXPECTED_POWER_ACTION_MESSAGES = ['restart host', 'shutdown host'].freeze
113
- DEFAULT_REFRESH_INTERVAL = 1
114
- MAX_PROCESS_RETRIES = 4
115
-
116
- def initialize(options, user_method, suspended_action: nil)
117
- super suspended_action: suspended_action
118
- @host = options.fetch(:hostname)
119
- @script = options.fetch(:script)
120
- @ssh_user = options.fetch(:ssh_user, 'root')
121
- @ssh_port = options.fetch(:ssh_port, 22)
122
- @ssh_password = options.fetch(:secrets, {}).fetch(:ssh_password, nil)
123
- @key_passphrase = options.fetch(:secrets, {}).fetch(:key_passphrase, nil)
124
- @host_public_key = options.fetch(:host_public_key, nil)
125
- @verify_host = options.fetch(:verify_host, nil)
126
- @execution_timeout_interval = options.fetch(:execution_timeout_interval, nil)
127
-
128
- @client_private_key_file = settings.fetch(:ssh_identity_key_file)
129
- @local_working_dir = options.fetch(:local_working_dir, settings.fetch(:local_working_dir))
130
- @remote_working_dir = options.fetch(:remote_working_dir, settings.fetch(:remote_working_dir))
131
- @cleanup_working_dirs = options.fetch(:cleanup_working_dirs, settings.fetch(:cleanup_working_dirs))
132
- @user_method = user_method
133
- end
134
-
135
- def self.build(options, suspended_action:)
136
- effective_user = options.fetch(:effective_user, nil)
137
- ssh_user = options.fetch(:ssh_user, 'root')
138
- effective_user_method = options.fetch(:effective_user_method, 'sudo')
139
-
140
- user_method = if effective_user.nil? || effective_user == ssh_user
141
- NoopUserMethod.new
142
- elsif effective_user_method == 'sudo'
143
- SudoUserMethod.new(effective_user, ssh_user,
144
- options.fetch(:secrets, {}).fetch(:sudo_password, nil))
145
- elsif effective_user_method == 'dzdo'
146
- DzdoUserMethod.new(effective_user, ssh_user,
147
- options.fetch(:secrets, {}).fetch(:sudo_password, nil))
148
- elsif effective_user_method == 'su'
149
- SuUserMethod.new(effective_user, ssh_user)
150
- else
151
- raise "effective_user_method '#{effective_user_method}' not supported"
152
- end
153
-
154
- new(options, user_method, suspended_action: suspended_action)
155
- end
156
-
157
- def start
158
- prepare_start
159
- script = initialization_script
160
- logger.debug("executing script:\n#{indent_multiline(script)}")
161
- trigger(script)
162
- rescue => e
163
- logger.error("error while initalizing command #{e.class} #{e.message}:\n #{e.backtrace.join("\n")}")
164
- publish_exception('Error initializing command', e)
165
- end
166
-
167
- def trigger(*args)
168
- run_async(*args)
169
- end
170
-
171
- def prepare_start
172
- @remote_script = cp_script_to_remote
173
- @output_path = File.join(File.dirname(@remote_script), 'output')
174
- @exit_code_path = File.join(File.dirname(@remote_script), 'exit_code')
175
- end
176
-
177
- # the script that initiates the execution
178
- def initialization_script
179
- # pipe the output to tee while capturing the exit code in a file
180
- <<-SCRIPT.gsub(/^\s+\| /, '')
181
- | sh <<WRAPPER
182
- | (#{@user_method.cli_command_prefix}#{@remote_script} < /dev/null; echo \\$?>#{@exit_code_path}) | /usr/bin/tee #{@output_path}
183
- | exit \\$(cat #{@exit_code_path})
184
- | WRAPPER
185
- SCRIPT
186
- end
187
-
188
- def refresh
189
- return if @session.nil?
190
-
191
- with_retries do
192
- with_disconnect_handling do
193
- @session.process(0)
194
- end
195
- end
196
- ensure
197
- check_expecting_disconnect
198
- end
199
-
200
- def kill
201
- if @session
202
- run_sync("pkill -f #{remote_command_file('script')}")
203
- else
204
- logger.debug('connection closed')
205
- end
206
- rescue => e
207
- publish_exception('Unexpected error', e, false)
208
- end
209
-
210
- def timeout
211
- @logger.debug('job timed out')
212
- super
213
- end
214
-
215
- def timeout_interval
216
- execution_timeout_interval
217
- end
218
-
219
- def with_retries
220
- tries = 0
221
- begin
222
- yield
223
- rescue => e
224
- logger.error("Unexpected error: #{e.class} #{e.message}\n #{e.backtrace.join("\n")}")
225
- tries += 1
226
- if tries <= MAX_PROCESS_RETRIES
227
- logger.error('Retrying')
228
- retry
229
- else
230
- publish_exception('Unexpected error', e)
231
- end
232
- end
233
- end
234
-
235
- def with_disconnect_handling
236
- yield
237
- rescue IOError, Net::SSH::Disconnect => e
238
- @session.shutdown!
239
- check_expecting_disconnect
240
- if @expecting_disconnect
241
- publish_exit_status(0)
242
- else
243
- publish_exception('Unexpected disconnect', e)
244
- end
245
- end
246
-
247
- def close
248
- run_sync("rm -rf \"#{remote_command_dir}\"") if should_cleanup?
249
- rescue => e
250
- publish_exception('Error when removing remote working dir', e, false)
251
- ensure
252
- @session.close if @session && !@session.closed?
253
- FileUtils.rm_rf(local_command_dir) if Dir.exist?(local_command_dir) && @cleanup_working_dirs
254
- end
255
-
256
- def publish_data(data, type)
257
- super(data.force_encoding('UTF-8'), type)
258
- end
259
-
260
- private
261
-
262
- def indent_multiline(string)
263
- string.lines.map { |line| " | #{line}" }.join
264
- end
265
-
266
- def should_cleanup?
267
- @session && !@session.closed? && @cleanup_working_dirs
268
- end
269
-
270
- def session
271
- @session ||= begin
272
- @logger.debug("opening session to #{@ssh_user}@#{@host}")
273
- Net::SSH.start(@host, @ssh_user, ssh_options)
274
- end
275
- end
276
-
277
- def ssh_options
278
- ssh_options = {}
279
- ssh_options[:port] = @ssh_port if @ssh_port
280
- ssh_options[:keys] = [@client_private_key_file] if @client_private_key_file
281
- ssh_options[:password] = @ssh_password if @ssh_password
282
- ssh_options[:passphrase] = @key_passphrase if @key_passphrase
283
- ssh_options[:keys_only] = true
284
- # if the host public key is contained in the known_hosts_file,
285
- # verify it, otherwise, if missing, import it and continue
286
- ssh_options[:paranoid] = true
287
- ssh_options[:auth_methods] = available_authentication_methods
288
- ssh_options[:user_known_hosts_file] = prepare_known_hosts if @host_public_key
289
- ssh_options[:number_of_password_prompts] = 1
290
- ssh_options[:verbose] = settings[:ssh_log_level]
291
- ssh_options[:logger] = ForemanRemoteExecutionCore::LogFilter.new(SmartProxyDynflowCore::Log.instance)
292
- return ssh_options
293
- end
294
-
295
- def settings
296
- ForemanRemoteExecutionCore.settings
297
- end
298
-
299
- # Initiates run of the remote command and yields the data when
300
- # available. The yielding doesn't happen automatically, but as
301
- # part of calling the `refresh` method.
302
- def run_async(command)
303
- raise 'Async command already in progress' if @started
304
-
305
- @started = false
306
- @user_method.reset
307
-
308
- session.open_channel do |channel|
309
- channel.request_pty
310
- channel.on_data do |ch, data|
311
- publish_data(data, 'stdout') unless @user_method.filter_password?(data)
312
- @user_method.on_data(data, ch)
313
- end
314
- channel.on_extended_data { |ch, type, data| publish_data(data, 'stderr') }
315
- # standard exit of the command
316
- channel.on_request('exit-status') { |ch, data| publish_exit_status(data.read_long) }
317
- # on signal: sending the signal value (such as 'TERM')
318
- channel.on_request('exit-signal') do |ch, data|
319
- publish_exit_status(data.read_string)
320
- ch.close
321
- # wait for the channel to finish so that we know at the end
322
- # that the session is inactive
323
- ch.wait
324
- end
325
- channel.exec(command) do |_, success|
326
- @started = true
327
- raise('Error initializing command') unless success
328
- end
329
- end
330
- session.process(0) { !run_started? }
331
- return true
332
- end
333
-
334
- def run_started?
335
- @started && @user_method.sent_all_data?
336
- end
337
-
338
- def run_sync(command, stdin = nil)
339
- stdout = ''
340
- stderr = ''
341
- exit_status = nil
342
- started = false
343
-
344
- channel = session.open_channel do |ch|
345
- ch.on_data do |c, data|
346
- stdout.concat(data)
347
- end
348
- ch.on_extended_data { |_, _, data| stderr.concat(data) }
349
- ch.on_request('exit-status') { |_, data| exit_status = data.read_long }
350
- # Send data to stdin if we have some
351
- ch.send_data(stdin) unless stdin.nil?
352
- # on signal: sending the signal value (such as 'TERM')
353
- ch.on_request('exit-signal') do |_, data|
354
- exit_status = data.read_string
355
- ch.close
356
- ch.wait
357
- end
358
- ch.exec command do |_, success|
359
- raise 'could not execute command' unless success
360
-
361
- started = true
362
- end
363
- end
364
- session.process(0) { !started }
365
- # Closing the channel without sending any data gives us SIGPIPE
366
- channel.close unless stdin.nil?
367
- channel.wait
368
- return exit_status, stdout, stderr
369
- end
370
-
371
- def prepare_known_hosts
372
- path = local_command_file('known_hosts')
373
- if @host_public_key
374
- write_command_file_locally('known_hosts', "#{@host} #{@host_public_key}")
375
- end
376
- return path
377
- end
378
-
379
- def local_command_dir
380
- File.join(@local_working_dir, 'foreman-proxy', "foreman-ssh-cmd-#{@id}")
381
- end
382
-
383
- def local_command_file(filename)
384
- File.join(local_command_dir, filename)
385
- end
386
-
387
- def remote_command_dir
388
- File.join(@remote_working_dir, "foreman-ssh-cmd-#{id}")
389
- end
390
-
391
- def remote_command_file(filename)
392
- File.join(remote_command_dir, filename)
393
- end
394
-
395
- def ensure_local_directory(path)
396
- if File.exist?(path)
397
- raise "#{path} expected to be a directory" unless File.directory?(path)
398
- else
399
- FileUtils.mkdir_p(path)
400
- end
401
- return path
402
- end
403
-
404
- def cp_script_to_remote(script = @script, name = 'script')
405
- path = remote_command_file(name)
406
- @logger.debug("copying script to #{path}:\n#{indent_multiline(script)}")
407
- upload_data(sanitize_script(script), path, 555)
408
- end
409
-
410
- def upload_data(data, path, permissions = 555)
411
- ensure_remote_directory File.dirname(path)
412
- # We use tee here to pipe stdin coming from ssh to a file at $path, while silencing its output
413
- # This is used to write to $path with elevated permissions, solutions using cat and output redirection
414
- # would not work, because the redirection would happen in the non-elevated shell.
415
- command = "tee '#{path}' >/dev/null && chmod '#{permissions}' '#{path}'"
416
-
417
- @logger.debug("Sending data to #{path} on remote host:\n#{data}")
418
- status, _out, err = run_sync(command, data)
419
-
420
- @logger.warn("Output on stderr while uploading #{path}:\n#{err}") unless err.empty?
421
- if status != 0
422
- raise "Unable to upload file to #{path} on remote system: exit code: #{status}"
423
- end
424
-
425
- path
426
- end
427
-
428
- def upload_file(local_path, remote_path)
429
- mode = File.stat(local_path).mode.to_s(8)[-3..-1]
430
- @logger.debug("Uploading local file: #{local_path} as #{remote_path} with #{mode} permissions")
431
- upload_data(File.read(local_path), remote_path, mode)
432
- end
433
-
434
- def ensure_remote_directory(path)
435
- exit_code, _output, err = run_sync("mkdir -p #{path}")
436
- if exit_code != 0
437
- raise "Unable to create directory on remote system #{path}: exit code: #{exit_code}\n #{err}"
438
- end
439
- end
440
-
441
- def sanitize_script(script)
442
- script.tr("\r", '')
443
- end
444
-
445
- def write_command_file_locally(filename, content)
446
- path = local_command_file(filename)
447
- ensure_local_directory(File.dirname(path))
448
- File.write(path, content)
449
- return path
450
- end
451
-
452
- # when a remote server disconnects, it's hard to tell if it was on purpose (when calling reboot)
453
- # or it's an error. When it's expected, we expect the script to produce 'restart host' as
454
- # its last command output
455
- def check_expecting_disconnect
456
- last_output = @continuous_output.raw_outputs.find { |d| d['output_type'] == 'stdout' }
457
- return unless last_output
458
-
459
- if EXPECTED_POWER_ACTION_MESSAGES.any? { |message| last_output['output'] =~ /^#{message}/ }
460
- @expecting_disconnect = true
461
- end
462
- end
463
-
464
- def available_authentication_methods
465
- methods = %w(publickey) # Always use pubkey auth as fallback
466
- if settings[:kerberos_auth]
467
- if defined? Net::SSH::Kerberos
468
- methods << 'gssapi-with-mic'
469
- else
470
- @logger.warn('Kerberos authentication requested but not available')
471
- end
472
- end
473
- methods.unshift('password') if @ssh_password
474
-
475
- methods
476
- end
477
- end
478
- end