swarm_cluster_cli_ope 0.5.0.pre.5 → 0.5.4

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: b3ef506ff18497d7a1e4d7e8641d439f006d5a4efd6065436e4f9ab3ff9a251c
4
- data.tar.gz: bbce35bf210097b7e2bb2d8a973ec345f2dd9cea786536947ddb88bd9baf0aef
3
+ metadata.gz: ec7a0b3dd9f3253a1e18bcb091b4e31a5de19b6304a91fe77ed4d704c9a9f109
4
+ data.tar.gz: b552969314967d155c2ddba35de7f4f809b5905830c1fb2edc20a0542879005f
5
5
  SHA512:
6
- metadata.gz: aa6b8661174a5b5c4961baf2c973c202b15b29ef25e5e796548c902339626d53aea85f6a70c83302127a4864ff868588d0c9b36431d9db71dd1c4626baa8482d
7
- data.tar.gz: 6d81962999b8b01337c03c299db875264dde14bdf2b093448ed47e7f0f0dc51bc53fcdc70086975f0e66c6efee2ccfd5545a669e29d03a499fc1a88ab2ed9460
6
+ metadata.gz: 64f80000c7b36cd5ef521f41939fb93060b3e94520081b33522d44ecf52cba7754022881089a981a206b230287289e19735efd9b6fdcb1acdb60e26563c2f14a
7
+ data.tar.gz: e7943eaad259b10bc0e3c6879aca2910d6a864e843d0235844eca2ef90c10c8ba7fa671b1fa2bbcbdec5041a0946abae4d96b6a49d311636f8a52dd5717a7a7a
@@ -1,5 +1,11 @@
1
1
  ## Changelog
2
2
 
3
+ # 0.5.4
4
+ - bug permessi sul file password dell'rsync
5
+
6
+ # 0.5.3
7
+ - bug selezione pod, ora filtra solamente per i containers che sono attivi
8
+
3
9
  # 0.4
4
10
  - implementazione push pull con il comando **stacksync** di pg
5
11
  - controllo di versione sul file caricato rispeto a gemma, con conseguente warning
@@ -1,7 +1,7 @@
1
1
  PATH
2
2
  remote: .
3
3
  specs:
4
- swarm_cluster_cli_ope (0.5.0.pre.5)
4
+ swarm_cluster_cli_ope (0.5.4)
5
5
  activesupport
6
6
  open4
7
7
  thor (~> 1.0)
@@ -10,23 +10,22 @@ PATH
10
10
  GEM
11
11
  remote: https://rubygems.org/
12
12
  specs:
13
- activesupport (6.0.3.4)
13
+ activesupport (6.1.1)
14
14
  concurrent-ruby (~> 1.0, >= 1.0.2)
15
- i18n (>= 0.7, < 2)
16
- minitest (~> 5.1)
17
- tzinfo (~> 1.1)
18
- zeitwerk (~> 2.2, >= 2.2.2)
19
- concurrent-ruby (1.1.7)
20
- i18n (1.8.5)
15
+ i18n (>= 1.6, < 2)
16
+ minitest (>= 5.1)
17
+ tzinfo (~> 2.0)
18
+ zeitwerk (~> 2.3)
19
+ concurrent-ruby (1.1.8)
20
+ i18n (1.8.7)
21
21
  concurrent-ruby (~> 1.0)
22
- minitest (5.14.2)
22
+ minitest (5.14.3)
23
23
  open4 (1.3.4)
24
24
  rake (12.3.3)
25
- thor (1.0.1)
26
- thread_safe (0.3.6)
27
- tzinfo (1.2.8)
28
- thread_safe (~> 0.1)
29
- zeitwerk (2.4.1)
25
+ thor (1.1.0)
26
+ tzinfo (2.0.4)
27
+ concurrent-ruby (~> 1.0)
28
+ zeitwerk (2.4.2)
30
29
 
31
30
  PLATFORMS
32
31
  ruby
data/README.md CHANGED
@@ -211,7 +211,11 @@ docker-compose -f test_folder/test_1/docker-compose-local.yml up -d
211
211
  Per Kubernetes dobbiamo avere minikube installato.
212
212
  lanciare quindi l'ambiente di test:
213
213
 
214
+ ```shell script
214
215
  kubectl apply -f test_folder/test_k8s/test.yaml
216
+ docker-compose -f test_folder/test_k8s/docker-compose-local.yml up -d
217
+ ```
218
+
215
219
 
216
220
  To install this gem onto your local machine, run `bundle exec rake install`. To release a new version, update the version
217
221
  number in `version.rb`, and then run `bundle exec rake release`, which will create a git tag for the version, push git
@@ -2,7 +2,15 @@ require "zeitwerk"
2
2
  loader = Zeitwerk::Loader.for_gem
3
3
  loader.setup # ready!
4
4
 
5
+ require "active_support/core_ext/module/attribute_accessors"
5
6
  module SwarmClusterCliOpe
6
7
  class Error < StandardError; end
7
- # Your code goes here...
8
+
9
+
10
+ ##
11
+ # La configurazione che viene resa disponibile a tutte le funzioni sucessivamente all'interazione con il concern
12
+ # della configurazione o con il blocco di configurazione di un determinato enviroment
13
+ mattr_accessor :current_configuration
14
+ @@current_configuration = nil
15
+
8
16
  end
@@ -137,6 +137,8 @@ module SwarmClusterCliOpe
137
137
  exit
138
138
  end
139
139
 
140
+ evaluate_correct_command_usage(@_merged_configurations[@environment])
141
+
140
142
  @_merged_configurations[@environment]
141
143
 
142
144
  end
@@ -170,9 +172,22 @@ module SwarmClusterCliOpe
170
172
  end
171
173
  end
172
174
 
173
-
175
+ ##
176
+ # Indica il nome del progetto locale compose, quella parte di nome che viene attaccata in fronte
177
+ # ad ogni nome di servizio locale, e che come default è il nome della cartella in cui risiede
178
+ # il docker-compose.yml file
179
+ # @return [String]
180
+ def local_compose_project_name
181
+ File.basename(FileUtils.pwd).downcase
182
+ end
174
183
 
175
184
  private
185
+
186
+ ##
187
+ # Funzione che serve per identificare se siamo nella corretta classe di configurazione e di conseguenza nel corretto
188
+ # set di comandi di configurazione. Serve per non eseguire k8s con le vecchie impostazioni o viceversa
189
+ def evaluate_correct_command_usage(configuration) end
190
+
176
191
  ##
177
192
  # nome del file in cui salvare le configurazioni di progetto
178
193
  # @return [String]
@@ -79,17 +79,20 @@ module SwarmClusterCliOpe
79
79
  nodes.find { |c| c.id == node_id }
80
80
  end
81
81
 
82
- ##
83
- # Indica il nome del progetto locale compose, quella parte di nome che viene attaccata in fronte
84
- # ad ogni nome di servizio locale, e che come default è il nome della cartella in cui risiede
85
- # il docker-compose.yml file
86
- # @return [String]
87
- def local_compose_project_name
88
- File.basename(FileUtils.pwd).downcase
89
- end
90
82
 
91
83
  private
92
84
 
85
+
86
+ def evaluate_correct_command_usage(configuration)
87
+
88
+ if configuration[:connections_maps].keys.include?(:context)
89
+ puts "ATTENZIONE, I COMANDI DEVONO ESSERE LANCIATI DAL SUB COMANDO K8S"
90
+ exit
91
+ end
92
+
93
+ end
94
+
95
+
93
96
  ##
94
97
  # Path al file dove salviamo la cache dei managers, ha un TTL legato all'orario (anno-mese-giorno-ora)
95
98
  # quindi ogni ora si autoripulisce e con un md5 delle configurazioni di base
@@ -8,7 +8,7 @@ module SwarmClusterCliOpe
8
8
 
9
9
  # @return [SwarmClusterCliOpe::Configuration]
10
10
  def cfgs
11
- self.class.cfgs
11
+ SwarmClusterCliOpe.current_configuration ||= self.class.cfgs
12
12
  end
13
13
 
14
14
  end
@@ -48,16 +48,27 @@ module SwarmClusterCliOpe
48
48
  SyncConfigs::Sqlite3
49
49
  when 'rsync'
50
50
  SyncConfigs::Rsync
51
- # when 'mysql'
52
- # SyncConfigs::Mysql
53
- # when 'pg'
54
- # SyncConfigs::PostGres
51
+ when 'mysql'
52
+ SyncConfigs::Mysql
53
+ when 'pg'
54
+ SyncConfigs::PostGres
55
55
  else
56
56
  logger.error { "CONFIGURAIONE NON PREVISTA: #{name}" }
57
57
  nil
58
58
  end
59
59
  end
60
60
 
61
+ private
62
+
63
+ def evaluate_correct_command_usage(configuration)
64
+
65
+ unless configuration[:connections_maps].keys.include?(:context)
66
+ puts "ATTENZIONE, I COMANDI NON DEVONO ESSERE LANCIATI DAL SUB COMANDO K8S"
67
+ exit
68
+ end
69
+
70
+ end
71
+
61
72
 
62
73
  end
63
74
  end
@@ -30,9 +30,9 @@ module SwarmClusterCliOpe
30
30
  end
31
31
 
32
32
  # @param [String,Array<String>] cmd -> comando da passare a kubectl exec -- CMD
33
- # @return [SwarmClusterCliOpe::ShellCommandExecution]
33
+ # @return [SwarmClusterCliOpe::ShellCommandResponse]
34
34
  def exec(cmd)
35
- base_cmd(["exec", name, "--", cmd].flatten)
35
+ base_cmd(["exec", name, "--", cmd].flatten).execute
36
36
  end
37
37
 
38
38
  ##
@@ -90,6 +90,7 @@ module SwarmClusterCliOpe
90
90
  base_cmd << "--context=#{context}" unless context.blank?
91
91
  base_cmd << "get pod"
92
92
  base_cmd << "--selector=#{selector}"
93
+ base_cmd << "--field-selector=status.phase=Running" #solo pod che stanno girando teniamo sotto controllo
93
94
  base_cmd << "--output=json"
94
95
 
95
96
  cmd = ShellCommandExecution.new(base_cmd)
@@ -0,0 +1,10 @@
1
+ module SwarmClusterCliOpe
2
+ module Kubernetes
3
+ module SyncConfigs
4
+ class Mysql < SwarmClusterCliOpe::SyncConfigs::Mysql
5
+
6
+ include BaseDecorator
7
+ end
8
+ end
9
+ end
10
+ end
@@ -0,0 +1,11 @@
1
+ module SwarmClusterCliOpe
2
+ module Kubernetes
3
+ module SyncConfigs
4
+ class PostGres < SwarmClusterCliOpe::SyncConfigs::PostGres
5
+
6
+ include BaseDecorator
7
+
8
+ end
9
+ end
10
+ end
11
+ end
@@ -47,22 +47,22 @@ module SwarmClusterCliOpe
47
47
  end
48
48
 
49
49
  cmd = container.exec(['bash -c "apt update && apt install -yq rsync psmisc"'])
50
- if cmd.execute.failed?
50
+ if cmd.failed?
51
51
  puts "Problemi nell'installazione di rsync nel pod"
52
52
  else
53
- cmd = container.cp_in(File.expand_path("../../rsync_cfgs/rsyncd.conf", __FILE__), "/tmp/.")
53
+ cmd = container.cp_in(configs_path("rsyncd.conf"), "/tmp/.")
54
54
  copy_1 = cmd.execute.failed?
55
- cmd = container.cp_in(File.expand_path("../../rsync_cfgs/rsyncd.secrets", __FILE__), "/tmp/.")
55
+ cmd = container.cp_in(configs_path("rsyncd.secrets"), "/tmp/.")
56
56
  copy_2 = cmd.execute.failed?
57
57
  cmd = container.exec(['bash -c "chmod 600 /tmp/rsyncd.secrets && chown root /tmp/*"'])
58
- chmod = cmd.execute.failed?
58
+ chmod = cmd.failed?
59
59
  if copy_1 or copy_2 or chmod
60
60
  puts "problema nella copia dei file di configurazione nel pod"
61
61
  else
62
62
 
63
63
  begin
64
64
  cmd = container.exec('bash -c "rsync --daemon --config=/tmp/rsyncd.conf --verbose --log-file=/tmp/rsync.log"')
65
- if cmd.execute.failed?
65
+ if cmd.failed?
66
66
  say "Rsync non Inizializzato"
67
67
  else
68
68
  begin
@@ -76,11 +76,15 @@ module SwarmClusterCliOpe
76
76
 
77
77
  sleep 1
78
78
 
79
+ # forzo i permessi sul file della password
80
+ cmd = ShellCommandExecution.new(["chmod 600 #{ configs_path("password")}"])
81
+ cmd.execute
82
+
79
83
  # lanciamo il comando quindi per far rsync
80
84
  rsync_command = [
81
85
  "rsync -az --no-o --no-g",
82
86
  "--delete",
83
- "--password-file=#{ File.expand_path("../../rsync_cfgs/password", __FILE__)}"
87
+ "--password-file=#{ configs_path("password")}"
84
88
  ]
85
89
 
86
90
  if direction == :up
@@ -102,15 +106,13 @@ module SwarmClusterCliOpe
102
106
  end
103
107
  ensure
104
108
  say "Tolgo il servizio di rsyn"
105
- cmd = container.exec('bash -c "killall rsync"')
106
- cmd.execute
109
+ container.exec('bash -c "killall rsync"')
107
110
  end
108
111
  end
109
112
 
110
113
  ensure
111
114
  say "Eseguo pulizia configurazioni caricate"
112
- cmd = container.exec('bash -c "rm -fr /tmp/rsyncd*"')
113
- cmd.execute
115
+ container.exec('bash -c "rm -fr /tmp/rsyncd*"')
114
116
  end
115
117
 
116
118
  end
@@ -121,6 +123,14 @@ module SwarmClusterCliOpe
121
123
 
122
124
  end
123
125
 
126
+ ##
127
+ # Estrapola la path al file di configurazione
128
+ # @param [String] file
129
+ # @return [String]
130
+ def configs_path(file)
131
+ File.expand_path("../../rsync_cfgs/#{file}", __FILE__)
132
+ end
133
+
124
134
  end
125
135
  end
126
136
  end
@@ -6,7 +6,7 @@ module SwarmClusterCliOpe
6
6
  return LoggerConcern.const_get("LOGGER") if LoggerConcern.const_defined?("LOGGER")
7
7
  logger = Logger.new(STDOUT)
8
8
  LoggerConcern.const_set("LOGGER", logger)
9
- logger.level = case Configuration.instance.logger_level
9
+ logger.level = case BaseConfiguration.instance.logger_level
10
10
  when "0"
11
11
  Logger::ERROR
12
12
  when "1"
@@ -102,6 +102,13 @@ module SwarmClusterCliOpe
102
102
  else
103
103
  raise "ONLY [push|pull] action accepted"
104
104
  end
105
+
106
+ if direction == :push
107
+ unless yes? "ATTENZIONE STAI FACENDO PUSH, proseguire????[y,yes]"
108
+ exit "OK, CIAO"
109
+ end
110
+ end
111
+
105
112
  cfgs.env(options[:environment]) do |cfgs|
106
113
  sync_cfgs = cfgs.sync_configurations
107
114
  if sync_cfgs.empty?
@@ -7,9 +7,24 @@ module SwarmClusterCliOpe
7
7
  resume('pull')
8
8
  if yes?("Confermare il comando?[y,yes]")
9
9
  tmp_file = "/tmp/#{Time.now.to_i}.sql.gz"
10
- container.exec("bash -c 'mysqldump -u #{remote.username} --password=#{remote.password} #{remote.database_name} | gzip -c -f' > #{tmp_file}")
10
+ container.exec("bash -c 'mysqldump -u #{remote.username} --password=#{remote.password} #{remote.database_name} | gzip -c -f' > #{tmp_file}")
11
11
  local_container.copy_in(tmp_file, tmp_file)
12
- local_container.exec("bash -c 'zcat #{tmp_file} | mysql -u #{local.username} --password=#{local.password} #{local.database_name}'")
12
+ local_authentication = "-u #{local.username} --password=#{local.password}"
13
+
14
+ command = []
15
+ command << "bash -c '"
16
+
17
+ command << "mysql #{local_authentication} -e \"DROP DATABASE IF EXISTS #{local.database_name};CREATE DATABASE #{local.database_name}\""
18
+
19
+ command << "&&"
20
+
21
+ command << "zcat #{tmp_file}"
22
+ command << "|"
23
+ command << "mysql #{local_authentication} #{local.database_name}"
24
+
25
+ command << "'"
26
+
27
+ local_container.exec(command.join" ")
13
28
  end
14
29
  true
15
30
  end
@@ -2,7 +2,6 @@ module SwarmClusterCliOpe
2
2
  module SyncConfigs
3
3
  class PostGres < BaseDatabase
4
4
 
5
-
6
5
  def pull
7
6
  resume('pull')
8
7
 
@@ -13,11 +12,11 @@ module SwarmClusterCliOpe
13
12
  local.container.copy_in(tmp_file, tmp_file)
14
13
 
15
14
  # drop old db and recreate
16
- if Gem::Version.new(local.database_version) <= Gem::Version.new("12")
17
- close_connections_and_drop_cmd(local)
18
- else
19
- raise "DA ANALIZZARE QUANDO LA 13 disponibile....dropdb ha un force come parametro"
20
- end
15
+ # if Gem::Version.new(local.database_version) <= Gem::Version.new("12")
16
+ close_connections_and_drop_cmd(local)
17
+ # else
18
+ # raise "DA ANALIZZARE QUANDO LA 13 disponibile....dropdb ha un force come parametro"
19
+ # end
21
20
 
22
21
  create_cmd(local)
23
22
 
@@ -37,12 +36,8 @@ module SwarmClusterCliOpe
37
36
  dump_cmd(local, tmp_file)
38
37
  remote.container.copy_in(tmp_file, tmp_file)
39
38
 
40
- # drop old db and recreate
41
- if Gem::Version.new(local.database_version) <= Gem::Version.new("12")
42
- close_connections_and_drop_cmd(remote)
43
- else
44
- raise "DA ANALIZZARE QUANDO LA 13 disponibile....dropdb ha un force come parametro"
45
- end
39
+ close_connections_and_drop_cmd(remote)
40
+
46
41
  create_cmd(remote)
47
42
 
48
43
  restore_cmd(remote, tmp_file)
@@ -125,23 +120,31 @@ module SwarmClusterCliOpe
125
120
 
126
121
  end
127
122
 
128
-
129
123
  # @param [EnvConfigs] config
130
124
  def close_connections_and_drop_cmd(config)
125
+
131
126
  cmd = []
132
- cmd << "PGPASSWORD=\"#{config.password}\""
133
127
 
134
- sql = []
135
- sql << "SELECT pg_terminate_backend(pg_stat_activity.pid) FROM pg_stat_activity WHERE pg_stat_activity.datname = '\"'\"'#{config.database_name}'\"'\"' AND pid <> pg_backend_pid();;"
136
- sql << "DROP DATABASE IF EXISTS #{config.database_name};"
128
+ if Gem::Version.new(config.database_version) >= Gem::Version.new("13")
129
+ cmd << "export PGPASSWORD=\"#{config.password}\" &&"
130
+ cmd << 'dropdb --force --if-exists'
131
+ cmd << "-U #{config.username}"
132
+ cmd << config.database_name
137
133
 
138
- cmd << "echo \"#{sql.join(" ")}\" "
139
- cmd << '|'
140
- cmd << 'psql'
141
- cmd << "-U #{config.username}"
142
- cmd << "postgres"
143
- cmd
134
+ else
135
+ cmd << "export PGPASSWORD=\"#{config.password}\" &&"
144
136
 
137
+ sql = []
138
+ sql << "SELECT pg_terminate_backend(pg_stat_activity.pid) FROM pg_stat_activity WHERE pg_stat_activity.datname = '\"'\"'#{config.database_name}'\"'\"' AND pid <> pg_backend_pid();;"
139
+ sql << "DROP DATABASE IF EXISTS #{config.database_name};"
140
+
141
+ cmd << "echo \"#{sql.join(" ")}\" "
142
+ cmd << '|'
143
+ cmd << 'psql'
144
+ cmd << "-U #{config.username}"
145
+ cmd << "postgres"
146
+
147
+ end
145
148
  logger.info { "CLOSE CONNECTIONS COMMAND: #{cmd.join(' ')}" }
146
149
  config.container.exec("bash -c '#{cmd.join(' ')}'")
147
150
  end
@@ -153,7 +156,6 @@ module SwarmClusterCliOpe
153
156
  # PGPASSWORD='root' createdb -U root -h 0.0.0.0 -p 32790 development;
154
157
  # PGPASSWORD='root' psql -U root -h 0.0.0.0 -p 32790 -d development < ./cortobio_production_new_2020-09-10-171742.sql
155
158
 
156
-
157
159
  end
158
160
  end
159
161
  end
@@ -1,3 +1,3 @@
1
1
  module SwarmClusterCliOpe
2
- VERSION = "0.5.0.pre.5"
2
+ VERSION = "0.5.4"
3
3
  end
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: swarm_cluster_cli_ope
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.5.0.pre.5
4
+ version: 0.5.4
5
5
  platform: ruby
6
6
  authors:
7
7
  - Marino Bonetti
8
8
  autorequire:
9
9
  bindir: exe
10
10
  cert_chain: []
11
- date: 2020-11-12 00:00:00.000000000 Z
11
+ date: 2021-01-29 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: thor
@@ -105,6 +105,8 @@ files:
105
105
  - lib/swarm_cluster_cli_ope/kubernetes/rsync_cfgs/rsyncd.conf
106
106
  - lib/swarm_cluster_cli_ope/kubernetes/rsync_cfgs/rsyncd.secrets
107
107
  - lib/swarm_cluster_cli_ope/kubernetes/sync_configs/base_decorator.rb
108
+ - lib/swarm_cluster_cli_ope/kubernetes/sync_configs/mysql.rb
109
+ - lib/swarm_cluster_cli_ope/kubernetes/sync_configs/post_gres.rb
108
110
  - lib/swarm_cluster_cli_ope/kubernetes/sync_configs/rsync.rb
109
111
  - lib/swarm_cluster_cli_ope/kubernetes/sync_configs/sqlite3.rb
110
112
  - lib/swarm_cluster_cli_ope/logger_concern.rb
@@ -151,9 +153,9 @@ required_ruby_version: !ruby/object:Gem::Requirement
151
153
  version: 2.3.0
152
154
  required_rubygems_version: !ruby/object:Gem::Requirement
153
155
  requirements:
154
- - - ">"
156
+ - - ">="
155
157
  - !ruby/object:Gem::Version
156
- version: 1.3.1
158
+ version: '0'
157
159
  requirements: []
158
160
  rubygems_version: 3.0.8
159
161
  signing_key: