swarm_cluster_cli_ope 0.1.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/.gitignore +10 -0
- data/.ruby-gemset +1 -0
- data/.ruby-version +1 -0
- data/CHANGELOG.md +9 -0
- data/Gemfile +6 -0
- data/Gemfile.lock +39 -0
- data/LICENSE.txt +21 -0
- data/README.md +121 -0
- data/Rakefile +2 -0
- data/exe/swarm_cli_ope +4 -0
- data/lib/swarm_cluster_cli_ope.rb +8 -0
- data/lib/swarm_cluster_cli_ope/cli.rb +251 -0
- data/lib/swarm_cluster_cli_ope/commands/base.rb +70 -0
- data/lib/swarm_cluster_cli_ope/commands/container.rb +24 -0
- data/lib/swarm_cluster_cli_ope/commands/service.rb +42 -0
- data/lib/swarm_cluster_cli_ope/commands/swarm.rb +14 -0
- data/lib/swarm_cluster_cli_ope/commands/task.rb +11 -0
- data/lib/swarm_cluster_cli_ope/configuration.rb +189 -0
- data/lib/swarm_cluster_cli_ope/configuration_concern.rb +24 -0
- data/lib/swarm_cluster_cli_ope/logger_concern.rb +26 -0
- data/lib/swarm_cluster_cli_ope/manager.rb +9 -0
- data/lib/swarm_cluster_cli_ope/models/base.rb +42 -0
- data/lib/swarm_cluster_cli_ope/models/container.rb +78 -0
- data/lib/swarm_cluster_cli_ope/models/mapped_volume.rb +43 -0
- data/lib/swarm_cluster_cli_ope/models/service.rb +38 -0
- data/lib/swarm_cluster_cli_ope/models/stack.rb +18 -0
- data/lib/swarm_cluster_cli_ope/models/task.rb +27 -0
- data/lib/swarm_cluster_cli_ope/node.rb +80 -0
- data/lib/swarm_cluster_cli_ope/shell_command_execution.rb +68 -0
- data/lib/swarm_cluster_cli_ope/shell_command_response.rb +68 -0
- data/lib/swarm_cluster_cli_ope/version.rb +3 -0
- data/lib/swarm_cluster_cli_ope/worker.rb +7 -0
- data/swarm_cluster_cli_ope.gemspec +36 -0
- metadata +138 -0
checksums.yaml
ADDED
@@ -0,0 +1,7 @@
|
|
1
|
+
---
|
2
|
+
SHA256:
|
3
|
+
metadata.gz: '09565df25f695d7e64c5034d7daaa833ba640c61b2b326ad046601355f13f705'
|
4
|
+
data.tar.gz: c997f3969018ae3335e86987595b7c5e1bba6792477c6c343f27e291a1430f47
|
5
|
+
SHA512:
|
6
|
+
metadata.gz: b1f2e55552084f16fd1b0f2bc9955113771212b52d5f9d508aac86a578e0bf1d95f2a513fa9b171e31cb7016d9899002331957d38fdc1e71afa0c1a677f95b59
|
7
|
+
data.tar.gz: a0daa2f6d6b4187d038eea485d65c88d5aa670760f99935d8115e4c8cccf6196f991b7e8f71a17881a44117f40a7d60cdde94a7d91707dc7eb154fb9ad70d4c1
|
data/.gitignore
ADDED
data/.ruby-gemset
ADDED
@@ -0,0 +1 @@
|
|
1
|
+
swarm_cluster_cli
|
data/.ruby-version
ADDED
@@ -0,0 +1 @@
|
|
1
|
+
ruby-2.5.3
|
data/CHANGELOG.md
ADDED
data/Gemfile
ADDED
data/Gemfile.lock
ADDED
@@ -0,0 +1,39 @@
|
|
1
|
+
PATH
|
2
|
+
remote: .
|
3
|
+
specs:
|
4
|
+
swarm_cluster_cli_ope (0.1.2)
|
5
|
+
activesupport
|
6
|
+
open4
|
7
|
+
thor (~> 1.0)
|
8
|
+
zeitwerk (~> 2.3)
|
9
|
+
|
10
|
+
GEM
|
11
|
+
remote: https://rubygems.org/
|
12
|
+
specs:
|
13
|
+
activesupport (6.0.2.2)
|
14
|
+
concurrent-ruby (~> 1.0, >= 1.0.2)
|
15
|
+
i18n (>= 0.7, < 2)
|
16
|
+
minitest (~> 5.1)
|
17
|
+
tzinfo (~> 1.1)
|
18
|
+
zeitwerk (~> 2.2)
|
19
|
+
concurrent-ruby (1.1.6)
|
20
|
+
i18n (1.8.2)
|
21
|
+
concurrent-ruby (~> 1.0)
|
22
|
+
minitest (5.14.0)
|
23
|
+
open4 (1.3.4)
|
24
|
+
rake (12.3.3)
|
25
|
+
thor (1.0.1)
|
26
|
+
thread_safe (0.3.6)
|
27
|
+
tzinfo (1.2.7)
|
28
|
+
thread_safe (~> 0.1)
|
29
|
+
zeitwerk (2.3.0)
|
30
|
+
|
31
|
+
PLATFORMS
|
32
|
+
ruby
|
33
|
+
|
34
|
+
DEPENDENCIES
|
35
|
+
rake (~> 12.0)
|
36
|
+
swarm_cluster_cli_ope!
|
37
|
+
|
38
|
+
BUNDLED WITH
|
39
|
+
1.17.3
|
data/LICENSE.txt
ADDED
@@ -0,0 +1,21 @@
|
|
1
|
+
The MIT License (MIT)
|
2
|
+
|
3
|
+
Copyright (c) 2020 Marino Bonetti
|
4
|
+
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
7
|
+
in the Software without restriction, including without limitation the rights
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
10
|
+
furnished to do so, subject to the following conditions:
|
11
|
+
|
12
|
+
The above copyright notice and this permission notice shall be included in
|
13
|
+
all copies or substantial portions of the Software.
|
14
|
+
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
21
|
+
THE SOFTWARE.
|
data/README.md
ADDED
@@ -0,0 +1,121 @@
|
|
1
|
+
# SwarmClusterCliOpe
|
2
|
+
WIP to translate
|
3
|
+
Gemma per la gestione semplificata degli operatori con un cluster swarm
|
4
|
+
|
5
|
+
## Installation
|
6
|
+
|
7
|
+
Add this line to your application's Gemfile:
|
8
|
+
|
9
|
+
```ruby
|
10
|
+
gem 'swarm_cluster_cli_ope'
|
11
|
+
```
|
12
|
+
|
13
|
+
And then execute:
|
14
|
+
|
15
|
+
$ bundle install
|
16
|
+
|
17
|
+
Or install it yourself as:
|
18
|
+
|
19
|
+
$ gem install swarm_cluster_cli_ope
|
20
|
+
|
21
|
+
## Usage
|
22
|
+
|
23
|
+
Una volta installato lanciare il comando
|
24
|
+
|
25
|
+
```swarm_cluster_cli_ope install``` che si occuperà di configurare le varie impostazioni dell'ambiente
|
26
|
+
|
27
|
+
FILE di configurazione base:
|
28
|
+
```json
|
29
|
+
{"version":"0.1.0","dev_mode":1,"log_level": "3","connections_maps":{"swm1": "swarm_node_1","swm2": "swarm_node_2","swm3": "swarm_node_3"}}
|
30
|
+
```
|
31
|
+
|
32
|
+
### LogLevel:
|
33
|
+
0 ERROR
|
34
|
+
1 WARN
|
35
|
+
2 INFO
|
36
|
+
3 DEBUG
|
37
|
+
|
38
|
+
|
39
|
+
### Configuratione di un progetto
|
40
|
+
Si occupa di generare nel progetto il file di configurazione in cui impostare impostazioni specifiche di progetto
|
41
|
+
quali stack_name
|
42
|
+
```shell script
|
43
|
+
swarm_cli_ope configure_project STACK_NAME
|
44
|
+
```
|
45
|
+
|
46
|
+
### Configurazioni applicate nel progetto:
|
47
|
+
```shell script
|
48
|
+
swarm_cli_ope config
|
49
|
+
```
|
50
|
+
|
51
|
+
### Elenco di tutti gli stack disponibili:
|
52
|
+
```shell script
|
53
|
+
swarm_cli_ope stacks
|
54
|
+
```
|
55
|
+
|
56
|
+
|
57
|
+
### MC:
|
58
|
+
Apre MC collegato al container del servizio specificato
|
59
|
+
```shell script
|
60
|
+
swarm_cli_ope mc SERVICE_NAME --stack-name=NOME_STACK
|
61
|
+
```
|
62
|
+
|
63
|
+
### SHELL:
|
64
|
+
Apre una shell (default bash) collegato al container del servizio specificato
|
65
|
+
```shell script
|
66
|
+
swarm_cli_ope shell SERVICE_NAME --stack-name=NOME_STACK
|
67
|
+
```
|
68
|
+
|
69
|
+
### Elenco di tutti i servizi
|
70
|
+
Se siamo nel progetto con il file di progetto vedremo comunque i servizi filtrati
|
71
|
+
```shell script
|
72
|
+
swarm_cli_ope services
|
73
|
+
```
|
74
|
+
|
75
|
+
filtrando per stack:
|
76
|
+
|
77
|
+
```shell script
|
78
|
+
swarm_cli_ope services --stack-name=NOME_STACK
|
79
|
+
```
|
80
|
+
|
81
|
+
### Copia di files da/verso container attraverso il docker cp
|
82
|
+
```shell script
|
83
|
+
swarm_cli_ope cp --stack-name=NOME_STACK PATH_FILE_LOCALE NOME_SERVIZIO:DESTINAZIONE_NEL_CONTAINER
|
84
|
+
```
|
85
|
+
ES:
|
86
|
+
```shell script
|
87
|
+
swarm_cli_ope cp --stack-name=webapps-examinerapp-staging ./test_folder/test_1/cartella_bindata/test jeapp:/tmp/.
|
88
|
+
```
|
89
|
+
|
90
|
+
### Rsync da/a container a/da locale
|
91
|
+
|
92
|
+
Utilizzare `rsync_binded_from` per scaricare e `rsync_binded_to` per caricare
|
93
|
+
|
94
|
+
|
95
|
+
```shell script
|
96
|
+
swarm_cli_ope rsync_binded_from --stack-name=STACK_NAME --service_name NOME_SERVIZIO_SENZA_STACK --binded-container-folders CARTELLA_CONTAINER --local-folder CARTELLA_DESTINAZIONE
|
97
|
+
```
|
98
|
+
|
99
|
+
ES:
|
100
|
+
```shell script
|
101
|
+
swarm_cli_ope rsync_binded_from --stack-name=web-site-ranchilbosco-production --service_name wordpress --binded-container-folders /var/www/html/wp-content/uploads --destination ./uploads
|
102
|
+
```
|
103
|
+
|
104
|
+
|
105
|
+
## Development
|
106
|
+
|
107
|
+
nel file di configurazione creato nella home aggiungere la chiave "dev_mode":1 per collegarsi localmente
|
108
|
+
|
109
|
+
### Abbiamo due tasks swarm di simulazione
|
110
|
+
```shell script
|
111
|
+
docker stack deploy -c test_folder/test_1/docker_compose.yml test1
|
112
|
+
docker stack deploy -c test_folder/test_2/docker_compose.yml test2
|
113
|
+
```
|
114
|
+
|
115
|
+
To install this gem onto your local machine, run `bundle exec rake install`. To release a new version, update the version number in `version.rb`, and then run `bundle exec rake release`, which will create a git tag for the version, push git commits and tags, and push the `.gem` file to [rubygems.org](https://rubygems.org).
|
116
|
+
|
117
|
+
|
118
|
+
## License
|
119
|
+
|
120
|
+
The gem is available as open source under the terms of the [MIT License](https://opensource.org/licenses/MIT).
|
121
|
+
|
data/Rakefile
ADDED
data/exe/swarm_cli_ope
ADDED
@@ -0,0 +1,251 @@
|
|
1
|
+
require 'thor'
|
2
|
+
require 'mkmf'
|
3
|
+
module SwarmClusterCliOpe
|
4
|
+
|
5
|
+
class Cli < Thor
|
6
|
+
include LoggerConcern
|
7
|
+
include ConfigurationConcern
|
8
|
+
include Thor::Actions
|
9
|
+
|
10
|
+
def self.exit_on_failure?
|
11
|
+
true
|
12
|
+
end
|
13
|
+
|
14
|
+
desc "install", "Creazione della configurazione base della gemma"
|
15
|
+
|
16
|
+
def install
|
17
|
+
#contolliamo se presente la configurazione base nella home
|
18
|
+
if Configuration.exist_base?
|
19
|
+
say "Configurazione già presente"
|
20
|
+
else
|
21
|
+
#se non presente allora chiediamo le varie configurazioni
|
22
|
+
lista = []
|
23
|
+
loop do
|
24
|
+
connection_name = ask("Aggiungi un server alla lista dei server Manager(inserire uri: ssh://server | unix:///socket/path:")
|
25
|
+
result = Node.info(connection_name)
|
26
|
+
node = Node.new(name: result.Name, connection_uri: connection_name)
|
27
|
+
say "Aggiungo #{node.name} che si connette con DOCKER_HOST=#{node.connection_uri}"
|
28
|
+
lista << node
|
29
|
+
break if no? "Vuoi inserire al server?[n,no]"
|
30
|
+
end
|
31
|
+
#scriviamo le varie configurazioni
|
32
|
+
cfg = cfgs
|
33
|
+
cfg.nodes = lista
|
34
|
+
cfg.save_base_cfgs
|
35
|
+
end
|
36
|
+
|
37
|
+
end
|
38
|
+
|
39
|
+
desc "config", "Visualizza le configurazioni mergiate (HOME + Project)"
|
40
|
+
|
41
|
+
def config
|
42
|
+
puts JSON.pretty_generate(cfgs.class.merged_configurations)
|
43
|
+
end
|
44
|
+
|
45
|
+
|
46
|
+
# DOCKER_HOST=ssh://swarm_node_1 docker stack ls --format="{{json .}}"
|
47
|
+
desc "stacks", "Lista degli stacks nel cluster"
|
48
|
+
|
49
|
+
def stacks
|
50
|
+
Models::Stack.all.each do |s|
|
51
|
+
puts s.name
|
52
|
+
end
|
53
|
+
end
|
54
|
+
|
55
|
+
desc "services", "lista dei servizi per uno stack"
|
56
|
+
option :stack_name, required: false, type: :string, default: cfgs.stack_name
|
57
|
+
|
58
|
+
def services
|
59
|
+
Models::Service.all(stack_name: options[:stack_name]).each do |s|
|
60
|
+
puts s.name
|
61
|
+
end
|
62
|
+
end
|
63
|
+
|
64
|
+
desc "mc SERVICE_NAME", "Apre MC tra la cartella attuale e il container (potrebbe dar luogo a degli errori, ma funziona)"
|
65
|
+
option :stack_name, required: false, type: :string, default: cfgs.stack_name
|
66
|
+
|
67
|
+
def mc(service_name)
|
68
|
+
|
69
|
+
# Disabilito output della libreria
|
70
|
+
MakeMakefile::Logging.instance_variable_set(:@logfile, File::NULL)
|
71
|
+
unless find_executable 'mc'
|
72
|
+
puts "Non hai installato MC"
|
73
|
+
exit 0
|
74
|
+
end
|
75
|
+
|
76
|
+
begin
|
77
|
+
container = Models::Container.find_by_service_name(service_name, stack_name: options[:stack_name])
|
78
|
+
|
79
|
+
server = container.node.hostname
|
80
|
+
|
81
|
+
# Creo container ssh
|
82
|
+
# DOCKER_HOST=ssh://swarm_node_1 docker run --rm -d -p 12222:22 \
|
83
|
+
# --volumes-from sistemi-test_swarm_cluster_cli_wordpress.1.zbbz1xxh4vzzccndvs973jnuc \
|
84
|
+
# sickp/alpine-sshd:7.5
|
85
|
+
#
|
86
|
+
cmd = container.docker_command
|
87
|
+
cmd.base_suffix_command = ''
|
88
|
+
shell_operation = cmd.command do |c|
|
89
|
+
c.add("run --rm -d -p 42222:22 --volumes-from #{container.id} sickp/alpine-sshd:7.5")
|
90
|
+
end
|
91
|
+
|
92
|
+
puts "Creazione container #{shell_operation.string_command}"
|
93
|
+
id_container = shell_operation.execute.raw_result[:stdout]
|
94
|
+
puts "Container generato con id:#{id_container}"
|
95
|
+
|
96
|
+
# eseguo tunnel verso nodo e container ssh
|
97
|
+
socket_ssh_path = "/tmp/socket_ssh_#{id_container}"
|
98
|
+
# ssh -f -N -T -M -S <path-to-socket> -L 13333:0.0.0.0:42222 <server>
|
99
|
+
cmd_tunnel = ["ssh", "-f -N -T -M", "-S #{socket_ssh_path}", "-L 13333:0.0.0.0:42222", server].join(" ")
|
100
|
+
puts "Apro tunnel"
|
101
|
+
puts cmd_tunnel
|
102
|
+
system(cmd_tunnel)
|
103
|
+
|
104
|
+
# apro MC
|
105
|
+
# mc . sftp://root:root@0.0.0.0:13333
|
106
|
+
mc_cmd = "mc . sftp://root:root@0.0.0.0:13333"
|
107
|
+
puts "Apro MC"
|
108
|
+
puts mc_cmd
|
109
|
+
system(mc_cmd)
|
110
|
+
ensure
|
111
|
+
if socket_ssh_path
|
112
|
+
# chiudo tunnel
|
113
|
+
# ssh -S <path-to-socket> -O exit <server>
|
114
|
+
close_tunnel_cmd = "ssh -S #{socket_ssh_path} -O exit #{server}"
|
115
|
+
puts "Chiudo tunnel"
|
116
|
+
# say close_tunnel_cmd
|
117
|
+
ShellCommandExecution.new(close_tunnel_cmd).execute
|
118
|
+
end
|
119
|
+
|
120
|
+
if id_container
|
121
|
+
# cancello container
|
122
|
+
# docker stop #{id_container}
|
123
|
+
puts "Spengo container di appoggio"
|
124
|
+
puts "docker stop #{id_container}"
|
125
|
+
cmd = container.docker_command
|
126
|
+
cmd.base_suffix_command = ''
|
127
|
+
stop_ssh_container = cmd.command do |c|
|
128
|
+
c.add("stop #{id_container}")
|
129
|
+
end
|
130
|
+
stop_ssh_container.execute
|
131
|
+
end
|
132
|
+
|
133
|
+
end
|
134
|
+
end
|
135
|
+
|
136
|
+
desc "cp SRC DEST", "Copia la sorgente in destinazione"
|
137
|
+
option :stack_name, required: false, type: :string, default: cfgs.stack_name
|
138
|
+
long_desc <<-LONGDESC
|
139
|
+
SRC e DEST possono essere un servizio, solo uno di essi può essere un servizio (TODO)
|
140
|
+
Per identificare che sia un servizio controllo se nella stringa è presete il :
|
141
|
+
il quale mi identifica l'inizio della PATH assoluta all'interno del primo container del servizio
|
142
|
+
dove copiare i files
|
143
|
+
LONGDESC
|
144
|
+
|
145
|
+
def cp(src, dest)
|
146
|
+
#identifico quale dei due è il servizio e quale la path
|
147
|
+
if src.match(/^(.*)\:/)
|
148
|
+
container = Models::Container.find_by_service_name(Regexp.last_match[1], stack_name: options[:stack_name])
|
149
|
+
ris = container.copy_out(src.match(/\:(.*)$/)[1], dest)
|
150
|
+
else
|
151
|
+
container = Models::Container.find_by_service_name(dest.match(/^(.*)\:/)[1], stack_name: options[:stack_name])
|
152
|
+
ris = container.copy_in(src, dest.match(/\:(.*)$/)[1])
|
153
|
+
end
|
154
|
+
puts "COMPLETATO" if ris
|
155
|
+
end
|
156
|
+
|
157
|
+
|
158
|
+
desc "configure_project STACK_NAME", "Genera il file di configurazione del progetto contenente il nome dello stack"
|
159
|
+
|
160
|
+
def configure_project(stack_name)
|
161
|
+
cfgs.stack_name = stack_name
|
162
|
+
cfgs.save_project_cfgs
|
163
|
+
end
|
164
|
+
|
165
|
+
desc "service_shell SERVICE_NAME", "apre una shell [default bash] dentro al container"
|
166
|
+
option :stack_name, required: false, type: :string, default: cfgs.stack_name
|
167
|
+
option :shell, required: false, type: :string, default: 'bash'
|
168
|
+
|
169
|
+
def service_shell(service_name)
|
170
|
+
container = Models::Container.find_by_service_name(service_name, stack_name: options[:stack_name])
|
171
|
+
|
172
|
+
cmd = container.docker_command
|
173
|
+
cmd.base_suffix_command = ''
|
174
|
+
shell_operation = cmd.command do |c|
|
175
|
+
c.add("exec -it #{container.id} #{options[:shell]}")
|
176
|
+
end
|
177
|
+
|
178
|
+
say "Stai entrando della shell in #{options[:shell]} del container #{options[:stack_name]}->#{container.name}[#{container.id}]"
|
179
|
+
system(shell_operation.string_command)
|
180
|
+
say "Shell chiusa"
|
181
|
+
end
|
182
|
+
|
183
|
+
|
184
|
+
desc "rsync_binded_from", "esegue un rsync dalla cartella bindata (viene sincronizzato il contenuto)"
|
185
|
+
option :stack_name, required: false, type: :string, default: cfgs.stack_name
|
186
|
+
option :service_name, required: true, type: :string
|
187
|
+
option :binded_container_folders, required: true, type: :string, desc: "path della cartella bindata all'interno del container da sincronizzare"
|
188
|
+
option :local_folder, required: false, type: :string, desc: "path della cartella dove sincronizzare il comando"
|
189
|
+
|
190
|
+
def rsync_binded_from
|
191
|
+
if yes? "Attenzione, i dati locali verranno sovrascritti/cancellati?[y,yes]"
|
192
|
+
rsync_binded(direction: :down, options: options)
|
193
|
+
end
|
194
|
+
end
|
195
|
+
|
196
|
+
desc "rsync_binded_to", "esegue un rsync verso la cartella bindata"
|
197
|
+
option :stack_name, required: false, type: :string, default: cfgs.stack_name
|
198
|
+
option :service_name, required: true, type: :string
|
199
|
+
option :binded_container_folders, required: true, type: :string, desc: "path della cartella bindata all'interno del container da sincronizzare"
|
200
|
+
option :local_folder, required: false, type: :string, desc: "path della cartella dove sincronizzare il comando"
|
201
|
+
|
202
|
+
def rsync_binded_to
|
203
|
+
if yes? "ATTENZIONE, i dati remoti verranno sovrascritti/cancellati da quelli locali?[y,yes]"
|
204
|
+
rsync_binded(direction: :up, options: options)
|
205
|
+
end
|
206
|
+
end
|
207
|
+
|
208
|
+
|
209
|
+
private
|
210
|
+
|
211
|
+
def rsync_binded(direction: :down, options: {})
|
212
|
+
|
213
|
+
# trovo il container del servizio
|
214
|
+
container = Models::Container.find_by_service_name(options[:service_name], stack_name: options[:stack_name])
|
215
|
+
|
216
|
+
if container.nil?
|
217
|
+
say "Container non trovato con #{options[:stack_name]}@##{options[:service_name]}"
|
218
|
+
exit 0
|
219
|
+
end
|
220
|
+
|
221
|
+
|
222
|
+
# trovo la cartella bindata e la relativa cartella sul nodo
|
223
|
+
volume = container.mapped_volumes.find { |v| v.destination == options[:binded_container_folders] and v.is_binded? }
|
224
|
+
if volume.nil?
|
225
|
+
say "Non ho trovato il volume bindato con questa destinazione all'interno del container #{options[:binded_container_folders]}"
|
226
|
+
exit 0
|
227
|
+
end
|
228
|
+
|
229
|
+
#costruisco il comando rsync fra cartella del nodo e cartella sul pc
|
230
|
+
cmd = ["rsync", "-zr", "--delete"]
|
231
|
+
if direction == :down
|
232
|
+
cmd << "#{volume.ssh_connection_path}/."
|
233
|
+
# creo la cartella in locale se non esiste
|
234
|
+
FileUtils.mkdir_p(options[:local_folder])
|
235
|
+
cmd << options[:local_folder]
|
236
|
+
end
|
237
|
+
if direction == :up
|
238
|
+
cmd << "#{options[:local_folder]}/."
|
239
|
+
cmd << volume.ssh_connection_path
|
240
|
+
end
|
241
|
+
|
242
|
+
cmd = ShellCommandExecution.new(cmd)
|
243
|
+
|
244
|
+
say "Comando da eseguire:"
|
245
|
+
say " #{cmd.string_command}"
|
246
|
+
if yes?("Confermare il comando?[y,yes]")
|
247
|
+
cmd.execute
|
248
|
+
end
|
249
|
+
end
|
250
|
+
end
|
251
|
+
end
|