smart_proxy_remote_execution_ssh 0.1.6 → 0.4.0
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +5 -5
- data/README.md +1 -1
- data/lib/smart_proxy_remote_execution_ssh.rb +28 -0
- data/lib/smart_proxy_remote_execution_ssh/actions/run_script.rb +20 -0
- data/lib/smart_proxy_remote_execution_ssh/api.rb +33 -0
- data/lib/smart_proxy_remote_execution_ssh/async_scripts/control.sh +110 -0
- data/lib/smart_proxy_remote_execution_ssh/async_scripts/retrieve.sh +151 -0
- data/lib/smart_proxy_remote_execution_ssh/cockpit.rb +269 -0
- data/lib/smart_proxy_remote_execution_ssh/dispatcher.rb +10 -0
- data/lib/smart_proxy_remote_execution_ssh/log_filter.rb +14 -0
- data/lib/smart_proxy_remote_execution_ssh/plugin.rb +27 -10
- data/lib/smart_proxy_remote_execution_ssh/runners.rb +7 -0
- data/lib/smart_proxy_remote_execution_ssh/runners/fake_script_runner.rb +87 -0
- data/lib/smart_proxy_remote_execution_ssh/runners/polling_script_runner.rb +140 -0
- data/lib/smart_proxy_remote_execution_ssh/runners/script_runner.rb +469 -0
- data/lib/smart_proxy_remote_execution_ssh/version.rb +1 -1
- data/lib/smart_proxy_remote_execution_ssh/webrick_ext.rb +17 -0
- data/settings.d/remote_execution_ssh.yml.example +8 -0
- metadata +61 -24
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
|
-
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
2
|
+
SHA256:
|
3
|
+
metadata.gz: 750910e916f0d4ad411cf868636075e597573dd8cca720e414156bcee55331dc
|
4
|
+
data.tar.gz: 4003e71f358abc47847fb9a2e4cf3c211189bc915b6b4196dd02d6d21633d2b8
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: efa2a87ce6a6125f7701979305a0c5fcc1fb3ad2703d5aef140505943789b45648311e46f892791a919a88d757f019485ff45209efc22423cc6543a298224a03
|
7
|
+
data.tar.gz: 4411e680ca841903d47b295090c4a194f92dfe91cc58796272d42b9c370ad6a3e6a8bc34973f0d7a85d93fe604788993428d944277b05426194893ecff0a9d51
|
data/README.md
CHANGED
@@ -51,7 +51,7 @@ The simplest thing one can do is just to trigger a command:
|
|
51
51
|
```
|
52
52
|
curl http://my-proxy.example.com:9292/dynflow/tasks \
|
53
53
|
-X POST -H 'Content-Type: application/json'\
|
54
|
-
-d '{"action_name": "
|
54
|
+
-d '{"action_name": "ForemanRemoteExecutionCore::Actions::RunScript",
|
55
55
|
"action_input": {"task_id" : "1234'$RANDOM'",
|
56
56
|
"script": "/usr/bin/ls",
|
57
57
|
"hostname": "localhost",
|
@@ -1,5 +1,7 @@
|
|
1
|
+
require 'foreman_tasks_core'
|
1
2
|
require 'smart_proxy_remote_execution_ssh/version'
|
2
3
|
require 'smart_proxy_dynflow'
|
4
|
+
require 'smart_proxy_remote_execution_ssh/webrick_ext'
|
3
5
|
require 'smart_proxy_remote_execution_ssh/plugin'
|
4
6
|
|
5
7
|
module Proxy::RemoteExecution
|
@@ -18,6 +20,8 @@ module Proxy::RemoteExecution
|
|
18
20
|
unless File.exist?(public_key_file)
|
19
21
|
raise "Ssh public key file #{public_key_file} doesn't exist"
|
20
22
|
end
|
23
|
+
|
24
|
+
validate_ssh_log_level!
|
21
25
|
end
|
22
26
|
|
23
27
|
def private_key_file
|
@@ -27,6 +31,30 @@ module Proxy::RemoteExecution
|
|
27
31
|
def public_key_file
|
28
32
|
File.expand_path("#{private_key_file}.pub")
|
29
33
|
end
|
34
|
+
|
35
|
+
def validate_ssh_log_level!
|
36
|
+
wanted_level = Plugin.settings.ssh_log_level.to_s
|
37
|
+
levels = Plugin::SSH_LOG_LEVELS
|
38
|
+
unless levels.include? wanted_level
|
39
|
+
raise "Wrong value '#{Plugin.settings.ssh_log_level}' for ssh_log_level, must be one of #{levels.join(', ')}"
|
40
|
+
end
|
41
|
+
|
42
|
+
current = ::Proxy::SETTINGS.log_level.to_s.downcase
|
43
|
+
|
44
|
+
# regular log levels correspond to upcased ssh logger levels
|
45
|
+
ssh, regular = [wanted_level, current].map do |wanted|
|
46
|
+
levels.each_with_index.find { |value, _index| value == wanted }.last
|
47
|
+
end
|
48
|
+
|
49
|
+
if ssh < regular
|
50
|
+
raise 'ssh_log_level cannot be more verbose than regular log level'
|
51
|
+
end
|
52
|
+
|
53
|
+
Plugin.settings.ssh_log_level = Plugin.settings.ssh_log_level.to_sym
|
54
|
+
end
|
30
55
|
end
|
56
|
+
|
57
|
+
require 'smart_proxy_dynflow_core/task_launcher_registry'
|
58
|
+
SmartProxyDynflowCore::TaskLauncherRegistry.register('ssh', ForemanTasksCore::TaskLauncher::Batch)
|
31
59
|
end
|
32
60
|
end
|
@@ -0,0 +1,20 @@
|
|
1
|
+
require 'foreman_tasks_core/shareable_action'
|
2
|
+
|
3
|
+
module Proxy::RemoteExecution::Ssh
|
4
|
+
module Actions
|
5
|
+
class RunScript < ForemanTasksCore::Runner::Action
|
6
|
+
def initiate_runner
|
7
|
+
additional_options = {
|
8
|
+
:step_id => run_step_id,
|
9
|
+
:uuid => execution_plan_id,
|
10
|
+
}
|
11
|
+
Proxy::RemoteExecution::Ssh::Plugin.runner_class.build(input.merge(additional_options),
|
12
|
+
suspended_action: suspended_action)
|
13
|
+
end
|
14
|
+
|
15
|
+
def runner_dispatcher
|
16
|
+
Dispatcher.instance
|
17
|
+
end
|
18
|
+
end
|
19
|
+
end
|
20
|
+
end
|
@@ -1,9 +1,42 @@
|
|
1
|
+
require 'net/ssh'
|
2
|
+
require 'base64'
|
3
|
+
|
1
4
|
module Proxy::RemoteExecution
|
2
5
|
module Ssh
|
6
|
+
|
3
7
|
class Api < ::Sinatra::Base
|
8
|
+
include Sinatra::Authorization::Helpers
|
9
|
+
|
4
10
|
get "/pubkey" do
|
5
11
|
File.read(Ssh.public_key_file)
|
6
12
|
end
|
13
|
+
|
14
|
+
post "/session" do
|
15
|
+
do_authorize_any
|
16
|
+
session = Cockpit::Session.new(env)
|
17
|
+
unless session.valid?
|
18
|
+
return [ 400, "Invalid request: /ssh/session requires connection upgrade to 'raw'" ]
|
19
|
+
end
|
20
|
+
session.hijack!
|
21
|
+
101
|
22
|
+
end
|
23
|
+
|
24
|
+
delete '/known_hosts/:name' do |name|
|
25
|
+
do_authorize_any
|
26
|
+
keys = Net::SSH::KnownHosts.search_for(name)
|
27
|
+
return [204] if keys.empty?
|
28
|
+
ssh_keys = keys.map { |key| Base64.strict_encode64 key.to_blob }
|
29
|
+
Net::SSH::KnownHosts.hostfiles({}, :user)
|
30
|
+
.map { |file| File.expand_path file }
|
31
|
+
.select { |file| File.readable?(file) && File.writable?(file) }
|
32
|
+
.each do |host_file|
|
33
|
+
lines = File.foreach(host_file).reject do |line|
|
34
|
+
ssh_keys.any? { |key| line.end_with? "#{key}\n" }
|
35
|
+
end
|
36
|
+
File.open(host_file, 'w') { |f| f.write lines.join }
|
37
|
+
end
|
38
|
+
204
|
39
|
+
end
|
7
40
|
end
|
8
41
|
end
|
9
42
|
end
|
@@ -0,0 +1,110 @@
|
|
1
|
+
#!/bin/sh
|
2
|
+
#
|
3
|
+
# Control script for the remote execution jobs.
|
4
|
+
#
|
5
|
+
# The initial script calls `$CONTROL_SCRIPT init-script-finish` once the original script exits.
|
6
|
+
# In automatic mode, the exit code is sent back to the proxy on `init-script-finish`.
|
7
|
+
#
|
8
|
+
# What the script provides is also a manual mode, where the author of the rex script can take
|
9
|
+
# full control of the job lifecycle. This allows keeping the marked as running even when
|
10
|
+
# the initial script finishes.
|
11
|
+
#
|
12
|
+
# The manual mode is turned on by calling `$CONTROL_SCRIPT manual-control`. After calling this,
|
13
|
+
# one can call `echo message | $CONTROL_SCRIPT update` to send output to the remote execution jobs
|
14
|
+
# and `$CONTROL_SCRIPT finish 0` once finished (with 0 as exit code) to send output to the remote execution jobs
|
15
|
+
# and `$CONTROL_SCRIPT finish 0` once finished (with 0 as exit code)
|
16
|
+
BASE_DIR="$(dirname "$(readlink -f "$0")")"
|
17
|
+
|
18
|
+
if ! command -v curl >/dev/null; then
|
19
|
+
echo 'curl is required' >&2
|
20
|
+
exit 1
|
21
|
+
fi
|
22
|
+
|
23
|
+
# send the callback data to proxy
|
24
|
+
update() {
|
25
|
+
"$BASE_DIR/retrieve.sh" push_update
|
26
|
+
}
|
27
|
+
|
28
|
+
# wait for named pipe $1 to retrieve data. If $2 is provided, it serves as timeout
|
29
|
+
# in seconds on how long to wait when reading.
|
30
|
+
wait_for_pipe() {
|
31
|
+
pipe_path=$1
|
32
|
+
if [ -n "$2" ]; then
|
33
|
+
timeout="-t $2"
|
34
|
+
fi
|
35
|
+
if read $timeout <>"$pipe_path"; then
|
36
|
+
rm "$pipe_path"
|
37
|
+
return 0
|
38
|
+
else
|
39
|
+
return 1
|
40
|
+
fi
|
41
|
+
}
|
42
|
+
|
43
|
+
# function run in background, when receiving update data via STDIN.
|
44
|
+
periodic_update() {
|
45
|
+
interval=1
|
46
|
+
# reading some data from periodic_update_control signals we're done
|
47
|
+
while ! wait_for_pipe "$BASE_DIR/periodic_update_control" "$interval"; do
|
48
|
+
update
|
49
|
+
done
|
50
|
+
# one more update before we finish
|
51
|
+
update
|
52
|
+
# signal the main process that we are finished
|
53
|
+
echo > "$BASE_DIR/periodic_update_finished"
|
54
|
+
}
|
55
|
+
|
56
|
+
# signal the periodic_update process that the main process is finishing
|
57
|
+
periodic_update_finish() {
|
58
|
+
if [ -e "$BASE_DIR/periodic_update_control" ]; then
|
59
|
+
echo > "$BASE_DIR/periodic_update_control"
|
60
|
+
fi
|
61
|
+
}
|
62
|
+
|
63
|
+
ACTION=${1:-finish}
|
64
|
+
|
65
|
+
case "$ACTION" in
|
66
|
+
init-script-finish)
|
67
|
+
if ! [ -e "$BASE_DIR/manual_mode" ]; then
|
68
|
+
# make the exit code of initialization script the exit code of the whole job
|
69
|
+
cp init_exit_code exit_code
|
70
|
+
update
|
71
|
+
fi
|
72
|
+
;;
|
73
|
+
finish)
|
74
|
+
# take exit code passed via the command line, with fallback
|
75
|
+
# to the exit code of the initialization script
|
76
|
+
exit_code=${2:-$(cat "$BASE_DIR/init_exit_code")}
|
77
|
+
echo $exit_code > "$BASE_DIR/exit_code"
|
78
|
+
update
|
79
|
+
if [ -e "$BASE_DIR/manual_mode" ]; then
|
80
|
+
rm "$BASE_DIR/manual_mode"
|
81
|
+
fi
|
82
|
+
;;
|
83
|
+
update)
|
84
|
+
# read data from input when redirected though a pipe
|
85
|
+
if ! [ -t 0 ]; then
|
86
|
+
# couple of named pipes to coordinate the main process with the periodic_update
|
87
|
+
mkfifo "$BASE_DIR/periodic_update_control"
|
88
|
+
mkfifo "$BASE_DIR/periodic_update_finished"
|
89
|
+
trap "periodic_update_finish" EXIT
|
90
|
+
# run periodic update as separate process to keep sending updates in output to server
|
91
|
+
periodic_update &
|
92
|
+
# redirect the input into output
|
93
|
+
tee -a "$BASE_DIR/output"
|
94
|
+
periodic_update_finish
|
95
|
+
# ensure the periodic update finished before we return
|
96
|
+
wait_for_pipe "$BASE_DIR/periodic_update_finished"
|
97
|
+
else
|
98
|
+
update
|
99
|
+
fi
|
100
|
+
;;
|
101
|
+
# mark the script to be in manual mode: this means the script author needs to use `update` and `finish`
|
102
|
+
# commands to send output to the remote execution job or mark it as finished.
|
103
|
+
manual-mode)
|
104
|
+
touch "$BASE_DIR/manual_mode"
|
105
|
+
;;
|
106
|
+
*)
|
107
|
+
echo "Unknown action $ACTION"
|
108
|
+
exit 1
|
109
|
+
;;
|
110
|
+
esac
|
@@ -0,0 +1,151 @@
|
|
1
|
+
#!/bin/sh
|
2
|
+
|
3
|
+
if ! pgrep --help 2>/dev/null >/dev/null; then
|
4
|
+
echo DONE 1
|
5
|
+
echo "pgrep is required" >&2
|
6
|
+
exit 1
|
7
|
+
fi
|
8
|
+
|
9
|
+
BASE_DIR="$(dirname "$(readlink -f "$0")")"
|
10
|
+
|
11
|
+
# load the data required for generating the callback
|
12
|
+
. "$BASE_DIR/env.sh"
|
13
|
+
URL_PREFIX="$CALLBACK_HOST/dynflow/tasks/$TASK_ID"
|
14
|
+
AUTH="$TASK_ID:$OTP"
|
15
|
+
CURL="curl --silent --show-error --fail --max-time 10"
|
16
|
+
|
17
|
+
MY_LOCK_FILE="$BASE_DIR/retrieve_lock.$$"
|
18
|
+
MY_PID=$$
|
19
|
+
echo $MY_PID >"$MY_LOCK_FILE"
|
20
|
+
LOCK_FILE="$BASE_DIR/retrieve_lock"
|
21
|
+
TMP_OUTPUT_FILE="$BASE_DIR/tmp_output"
|
22
|
+
|
23
|
+
RUN_TIMEOUT=30 # for how long can the script hold the lock
|
24
|
+
WAIT_TIMEOUT=60 # for how long the script is trying to acquire the lock
|
25
|
+
START_TIME=$(date +%s)
|
26
|
+
|
27
|
+
fail() {
|
28
|
+
echo RUNNING
|
29
|
+
echo "$1"
|
30
|
+
exit 1
|
31
|
+
}
|
32
|
+
|
33
|
+
acquire_lock() {
|
34
|
+
# try to acquire lock by creating the file (ln should be atomic an fail in case
|
35
|
+
# another process succeeded first). We also check the content of the lock file,
|
36
|
+
# in case our process won when competing over the lock while invalidating
|
37
|
+
# the lock on timeout.
|
38
|
+
ln "$MY_LOCK_FILE" "$LOCK_FILE" 2>/dev/null || [ "$(head -n1 "$LOCK_FILE")" = "$MY_PID" ]
|
39
|
+
return $?
|
40
|
+
}
|
41
|
+
|
42
|
+
# acquiring the lock before proceeding, to ensure only one instance of the script is running
|
43
|
+
while ! acquire_lock; do
|
44
|
+
# we failed to create retrieve_lock - assuming there is already another retrieve script running
|
45
|
+
current_pid=$(head -n1 "$LOCK_FILE")
|
46
|
+
if [ -z "$current_pid" ]; then
|
47
|
+
continue
|
48
|
+
fi
|
49
|
+
# check whether the lock is not too old (compared to $RUN_TIMEOUT) and try to kill
|
50
|
+
# if it is, so that we don't have a stalled processes here
|
51
|
+
lock_lines_count=$(wc -l < "$LOCK_FILE")
|
52
|
+
current_lock_time=$(stat --format "%Y" "$LOCK_FILE")
|
53
|
+
current_time=$(date +%s)
|
54
|
+
|
55
|
+
if [ "$(( current_time - START_TIME ))" -gt "$WAIT_TIMEOUT" ]; then
|
56
|
+
# We were waiting for the lock for too long - just give up
|
57
|
+
fail "Wait time exceeded $WAIT_TIMEOUT"
|
58
|
+
elif [ "$(( current_time - current_lock_time ))" -gt "$RUN_TIMEOUT" ]; then
|
59
|
+
# The previous lock it hold for too long - re-acquiring procedure
|
60
|
+
if [ "$lock_lines_count" -gt 1 ]; then
|
61
|
+
# there were multiple processes waiting for lock without resolution
|
62
|
+
# longer than the $RUN_TIMEOUT - we reset the lock file and let processes
|
63
|
+
# to compete
|
64
|
+
echo "RETRY" > "$LOCK_FILE"
|
65
|
+
fi
|
66
|
+
if [ "$current_pid" != "RETRY" ]; then
|
67
|
+
# try to kill the currently stalled process
|
68
|
+
kill -9 "$current_pid" 2>/dev/null
|
69
|
+
fi
|
70
|
+
# try to add our process as one candidate
|
71
|
+
echo $MY_PID >> "$LOCK_FILE"
|
72
|
+
if [ "$( head -n2 "$LOCK_FILE" | tail -n1 )" = "$MY_PID" ]; then
|
73
|
+
# our process won the competition for the new lock: it is the first pid
|
74
|
+
# after the original one in the lock file - take ownership of the lock
|
75
|
+
# next iteration only this process will get through
|
76
|
+
echo $MY_PID >"$LOCK_FILE"
|
77
|
+
fi
|
78
|
+
else
|
79
|
+
# still waiting for the original owner to finish
|
80
|
+
sleep 1
|
81
|
+
fi
|
82
|
+
done
|
83
|
+
|
84
|
+
release_lock() {
|
85
|
+
rm "$MY_LOCK_FILE"
|
86
|
+
rm "$LOCK_FILE"
|
87
|
+
}
|
88
|
+
# ensure the release the lock at exit
|
89
|
+
trap "release_lock" EXIT
|
90
|
+
|
91
|
+
# make sure we clear previous tmp output file
|
92
|
+
if [ -e "$TMP_OUTPUT_FILE" ]; then
|
93
|
+
rm "$TMP_OUTPUT_FILE"
|
94
|
+
fi
|
95
|
+
|
96
|
+
pid=$(cat "$BASE_DIR/pid")
|
97
|
+
[ -f "$BASE_DIR/position" ] || echo 1 > "$BASE_DIR/position"
|
98
|
+
position=$(cat "$BASE_DIR/position")
|
99
|
+
|
100
|
+
prepare_output() {
|
101
|
+
if [ -e "$BASE_DIR/manual_mode" ] || ([ -n "$pid" ] && pgrep -P "$pid" >/dev/null 2>&1); then
|
102
|
+
echo RUNNING
|
103
|
+
else
|
104
|
+
echo "DONE $(cat "$BASE_DIR/exit_code" 2>/dev/null)"
|
105
|
+
fi
|
106
|
+
[ -f "$BASE_DIR/output" ] || exit 0
|
107
|
+
tail --bytes "+${position}" "$BASE_DIR/output" > "$TMP_OUTPUT_FILE"
|
108
|
+
cat "$TMP_OUTPUT_FILE"
|
109
|
+
}
|
110
|
+
|
111
|
+
# prepare the callback payload
|
112
|
+
payload() {
|
113
|
+
if [ -n "$1" ]; then
|
114
|
+
exit_code="$1"
|
115
|
+
else
|
116
|
+
exit_code=null
|
117
|
+
fi
|
118
|
+
|
119
|
+
if [ -e "$BASE_DIR/manual_mode" ]; then
|
120
|
+
manual_mode=true
|
121
|
+
output=$(prepare_output | base64 -w0)
|
122
|
+
else
|
123
|
+
manual_mode=false
|
124
|
+
fi
|
125
|
+
|
126
|
+
echo "{ \"exit_code\": $exit_code,"\
|
127
|
+
" \"step_id\": \"$STEP_ID\","\
|
128
|
+
" \"manual_mode\": $manual_mode,"\
|
129
|
+
" \"output\": \"$output\" }"
|
130
|
+
}
|
131
|
+
|
132
|
+
if [ "$1" = "push_update" ]; then
|
133
|
+
if [ -e "$BASE_DIR/exit_code" ]; then
|
134
|
+
exit_code="$(cat "$BASE_DIR/exit_code")"
|
135
|
+
action="done"
|
136
|
+
else
|
137
|
+
exit_code=""
|
138
|
+
action="update"
|
139
|
+
fi
|
140
|
+
$CURL -X POST -d "$(payload $exit_code)" -u "$AUTH" "$URL_PREFIX"/$action 2>>"$BASE_DIR/curl_stderr"
|
141
|
+
success=$?
|
142
|
+
else
|
143
|
+
prepare_output
|
144
|
+
success=$?
|
145
|
+
fi
|
146
|
+
|
147
|
+
if [ "$success" = 0 ] && [ -e "$TMP_OUTPUT_FILE" ]; then
|
148
|
+
# in case the retrieval was successful, move the position of the cursor to be read next time
|
149
|
+
bytes=$(wc --bytes < "$TMP_OUTPUT_FILE")
|
150
|
+
expr "${position}" + "${bytes}" > "$BASE_DIR/position"
|
151
|
+
fi
|
@@ -0,0 +1,269 @@
|
|
1
|
+
require 'net/ssh'
|
2
|
+
require 'forwardable'
|
3
|
+
|
4
|
+
module Proxy::RemoteExecution
|
5
|
+
module Cockpit
|
6
|
+
# A wrapper class around different kind of sockets to comply with Net::SSH event loop
|
7
|
+
class BufferedSocket
|
8
|
+
include Net::SSH::BufferedIo
|
9
|
+
extend Forwardable
|
10
|
+
|
11
|
+
# The list of methods taken from OpenSSL::SSL::SocketForwarder for the object to act like a socket
|
12
|
+
def_delegators(:@socket, :to_io, :addr, :peeraddr, :setsockopt,
|
13
|
+
:getsockopt, :fcntl, :close, :closed?, :do_not_reverse_lookup=)
|
14
|
+
|
15
|
+
def initialize(socket)
|
16
|
+
@socket = socket
|
17
|
+
initialize_buffered_io
|
18
|
+
end
|
19
|
+
|
20
|
+
def recv
|
21
|
+
raise NotImplementedError
|
22
|
+
end
|
23
|
+
|
24
|
+
def send
|
25
|
+
raise NotImplementedError
|
26
|
+
end
|
27
|
+
|
28
|
+
def self.applies_for?(socket)
|
29
|
+
raise NotImplementedError
|
30
|
+
end
|
31
|
+
|
32
|
+
def self.build(socket)
|
33
|
+
klass = [OpenSSLBufferedSocket, MiniSSLBufferedSocket, StandardBufferedSocket].find do |potential_class|
|
34
|
+
potential_class.applies_for?(socket)
|
35
|
+
end
|
36
|
+
raise "No suitable implementation of buffered socket available for #{socket.inspect}" unless klass
|
37
|
+
klass.new(socket)
|
38
|
+
end
|
39
|
+
end
|
40
|
+
|
41
|
+
class StandardBufferedSocket < BufferedSocket
|
42
|
+
def_delegators(:@socket, :send, :recv)
|
43
|
+
|
44
|
+
def self.applies_for?(socket)
|
45
|
+
socket.respond_to?(:send) && socket.respond_to?(:recv)
|
46
|
+
end
|
47
|
+
end
|
48
|
+
|
49
|
+
class OpenSSLBufferedSocket < BufferedSocket
|
50
|
+
def self.applies_for?(socket)
|
51
|
+
socket.is_a? ::OpenSSL::SSL::SSLSocket
|
52
|
+
end
|
53
|
+
def_delegators(:@socket, :read_nonblock, :write_nonblock, :close)
|
54
|
+
|
55
|
+
def recv(n)
|
56
|
+
res = ""
|
57
|
+
begin
|
58
|
+
# To drain a SSLSocket before we can go back to the event
|
59
|
+
# loop, we need to repeatedly call read_nonblock; a single
|
60
|
+
# call is not enough.
|
61
|
+
loop do
|
62
|
+
res += @socket.read_nonblock(n)
|
63
|
+
end
|
64
|
+
rescue IO::WaitReadable
|
65
|
+
# Sometimes there is no payload after reading everything
|
66
|
+
# from the underlying socket, but a empty string is treated
|
67
|
+
# as EOF by Net::SSH. So we block a bit until we have
|
68
|
+
# something to return.
|
69
|
+
if res == ""
|
70
|
+
IO.select([@socket.to_io])
|
71
|
+
retry
|
72
|
+
else
|
73
|
+
res
|
74
|
+
end
|
75
|
+
rescue IO::WaitWritable
|
76
|
+
# A renegotiation is happening, let it proceed.
|
77
|
+
IO.select(nil, [@socket.to_io])
|
78
|
+
retry
|
79
|
+
end
|
80
|
+
end
|
81
|
+
|
82
|
+
def send(mesg, flags)
|
83
|
+
@socket.write_nonblock(mesg)
|
84
|
+
rescue IO::WaitWritable
|
85
|
+
0
|
86
|
+
rescue IO::WaitReadable
|
87
|
+
IO.select([@socket.to_io])
|
88
|
+
retry
|
89
|
+
end
|
90
|
+
end
|
91
|
+
|
92
|
+
class MiniSSLBufferedSocket < BufferedSocket
|
93
|
+
def self.applies_for?(socket)
|
94
|
+
socket.is_a? ::Puma::MiniSSL::Socket
|
95
|
+
end
|
96
|
+
def_delegators(:@socket, :read_nonblock, :write_nonblock, :close)
|
97
|
+
|
98
|
+
def recv(n)
|
99
|
+
@socket.read_nonblock(n)
|
100
|
+
end
|
101
|
+
|
102
|
+
def send(mesg, flags)
|
103
|
+
@socket.write_nonblock(mesg)
|
104
|
+
end
|
105
|
+
|
106
|
+
def closed?
|
107
|
+
@socket.to_io.closed?
|
108
|
+
end
|
109
|
+
end
|
110
|
+
|
111
|
+
class Session
|
112
|
+
include ::Proxy::Log
|
113
|
+
|
114
|
+
def initialize(env)
|
115
|
+
@env = env
|
116
|
+
end
|
117
|
+
|
118
|
+
def valid?
|
119
|
+
@env["HTTP_CONNECTION"] == "upgrade" && @env["HTTP_UPGRADE"].to_s.split(',').any? { |part| part.strip == "raw" }
|
120
|
+
end
|
121
|
+
|
122
|
+
def hijack!
|
123
|
+
@socket = nil
|
124
|
+
if @env['ext.hijack!']
|
125
|
+
@socket = @env['ext.hijack!'].call
|
126
|
+
elsif @env['rack.hijack?']
|
127
|
+
begin
|
128
|
+
@env['rack.hijack'].call
|
129
|
+
rescue NotImplementedError
|
130
|
+
end
|
131
|
+
@socket = @env['rack.hijack_io']
|
132
|
+
end
|
133
|
+
raise 'Internal error: request hijacking not available' unless @socket
|
134
|
+
ssh_on_socket
|
135
|
+
end
|
136
|
+
|
137
|
+
private
|
138
|
+
|
139
|
+
def ssh_on_socket
|
140
|
+
with_error_handling { start_ssh_loop }
|
141
|
+
end
|
142
|
+
|
143
|
+
def with_error_handling
|
144
|
+
yield
|
145
|
+
rescue Net::SSH::AuthenticationFailed => e
|
146
|
+
send_error(401, e.message)
|
147
|
+
rescue Errno::EHOSTUNREACH
|
148
|
+
send_error(400, "No route to #{host}")
|
149
|
+
rescue SystemCallError => e
|
150
|
+
send_error(400, e.message)
|
151
|
+
rescue SocketError => e
|
152
|
+
send_error(400, e.message)
|
153
|
+
rescue Exception => e
|
154
|
+
logger.error e.message
|
155
|
+
logger.debug e.backtrace.join("\n")
|
156
|
+
send_error(500, "Internal error") unless @started
|
157
|
+
ensure
|
158
|
+
unless buf_socket.closed?
|
159
|
+
buf_socket.wait_for_pending_sends
|
160
|
+
buf_socket.close
|
161
|
+
end
|
162
|
+
end
|
163
|
+
|
164
|
+
def start_ssh_loop
|
165
|
+
err_buf = ""
|
166
|
+
|
167
|
+
Net::SSH.start(host, ssh_user, ssh_options) do |ssh|
|
168
|
+
channel = ssh.open_channel do |ch|
|
169
|
+
ch.exec(command) do |ch, success|
|
170
|
+
raise "could not execute command" unless success
|
171
|
+
|
172
|
+
ssh.listen_to(buf_socket)
|
173
|
+
|
174
|
+
ch.on_process do
|
175
|
+
if buf_socket.available.positive?
|
176
|
+
ch.send_data(buf_socket.read_available)
|
177
|
+
end
|
178
|
+
if buf_socket.closed?
|
179
|
+
ch.close
|
180
|
+
end
|
181
|
+
end
|
182
|
+
|
183
|
+
ch.on_data do |ch2, data|
|
184
|
+
send_start
|
185
|
+
buf_socket.enqueue(data)
|
186
|
+
end
|
187
|
+
|
188
|
+
ch.on_request('exit-status') do |ch, data|
|
189
|
+
code = data.read_long
|
190
|
+
send_start if code.zero?
|
191
|
+
err_buf += "Process exited with code #{code}.\r\n"
|
192
|
+
ch.close
|
193
|
+
end
|
194
|
+
|
195
|
+
ch.on_request('exit-signal') do |ch, data|
|
196
|
+
err_buf += "Process was terminated with signal #{data.read_string}.\r\n"
|
197
|
+
ch.close
|
198
|
+
end
|
199
|
+
|
200
|
+
ch.on_extended_data do |ch2, type, data|
|
201
|
+
err_buf += data
|
202
|
+
end
|
203
|
+
end
|
204
|
+
end
|
205
|
+
|
206
|
+
channel.wait
|
207
|
+
send_error(400, err_buf) unless @started
|
208
|
+
end
|
209
|
+
end
|
210
|
+
|
211
|
+
def send_start
|
212
|
+
unless @started
|
213
|
+
@started = true
|
214
|
+
buf_socket.enqueue("Status: 101\r\n")
|
215
|
+
buf_socket.enqueue("Connection: upgrade\r\n")
|
216
|
+
buf_socket.enqueue("Upgrade: raw\r\n")
|
217
|
+
buf_socket.enqueue("\r\n")
|
218
|
+
end
|
219
|
+
end
|
220
|
+
|
221
|
+
def send_error(code, msg)
|
222
|
+
buf_socket.enqueue("Status: #{code}\r\n")
|
223
|
+
buf_socket.enqueue("Connection: close\r\n")
|
224
|
+
buf_socket.enqueue("\r\n")
|
225
|
+
buf_socket.enqueue(msg)
|
226
|
+
end
|
227
|
+
|
228
|
+
def params
|
229
|
+
@params ||= MultiJson.load(@env["rack.input"].read)
|
230
|
+
end
|
231
|
+
|
232
|
+
def key_file
|
233
|
+
@key_file ||= Proxy::RemoteExecution::Ssh.private_key_file
|
234
|
+
end
|
235
|
+
|
236
|
+
def buf_socket
|
237
|
+
@buffered_socket ||= BufferedSocket.build(@socket)
|
238
|
+
end
|
239
|
+
|
240
|
+
def command
|
241
|
+
params["command"]
|
242
|
+
end
|
243
|
+
|
244
|
+
def ssh_user
|
245
|
+
params["ssh_user"]
|
246
|
+
end
|
247
|
+
|
248
|
+
def host
|
249
|
+
params["hostname"]
|
250
|
+
end
|
251
|
+
|
252
|
+
def ssh_options
|
253
|
+
auth_methods = %w[publickey]
|
254
|
+
auth_methods.unshift('password') if params["ssh_password"]
|
255
|
+
|
256
|
+
ret = {}
|
257
|
+
ret[:port] = params["ssh_port"] if params["ssh_port"]
|
258
|
+
ret[:keys] = [key_file] if key_file
|
259
|
+
ret[:password] = params["ssh_password"] if params["ssh_password"]
|
260
|
+
ret[:passphrase] = params[:ssh_key_passphrase] if params[:ssh_key_passphrase]
|
261
|
+
ret[:keys_only] = true
|
262
|
+
ret[:auth_methods] = auth_methods
|
263
|
+
ret[:verify_host_key] = true
|
264
|
+
ret[:number_of_password_prompts] = 1
|
265
|
+
ret
|
266
|
+
end
|
267
|
+
end
|
268
|
+
end
|
269
|
+
end
|