swarm_cluster_cli_ope 0.5.0.pre.3 → 0.5.2

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 137adc8bf19db9527fc5fdf3ec3eb5c71f0efd8b553d540eac1007a5ed31999a
4
- data.tar.gz: '09d17f52234efab31318398e9bf4a2d992af47fee5c9870373dba1a7de20c13c'
3
+ metadata.gz: 0c8d4d620c7c8929ca7606a553347601906ebdf99c13ac77c103ee94e4d46c74
4
+ data.tar.gz: cb045c6a4f21ba1482e79b1d810476677c509ac737cc3ef000bd68afb34ea5eb
5
5
  SHA512:
6
- metadata.gz: 257e7b406f1242637fb93b96da32a1ea7b08c189554d5442803b29dc8c182f13d9036d294ec62a2b65b0a25cad896e9203f5d9eb18fb0cea0d37245365250214
7
- data.tar.gz: e84a120a6953ea3a08a8a1bd4a2c684923547f6fc83f357a1419d2832ad8c25e4fdbcd39cf4008fb88976db74020a1d007088f396391015ff20eeae2a23d72e2
6
+ metadata.gz: 34772d9e2eb49263ec059a78bcbaa75629043f3d58b2c47c15c8061c430f995d6c5af6100b66c63781b17d450e012574f32a3c48228b24e2c51fc587904204e6
7
+ data.tar.gz: 1f59ee9abaf08937a09f26e6f0d7c77d88b595154aac0a0b9399299a4dc2bb7ffb59c7c5906d4fbcdf9d69967ccbc8765fee58e64334544f19a48267fb93d75e
@@ -1,7 +1,7 @@
1
1
  PATH
2
2
  remote: .
3
3
  specs:
4
- swarm_cluster_cli_ope (0.5.0.pre.3)
4
+ swarm_cluster_cli_ope (0.5.2)
5
5
  activesupport
6
6
  open4
7
7
  thor (~> 1.0)
data/README.md CHANGED
@@ -211,7 +211,11 @@ docker-compose -f test_folder/test_1/docker-compose-local.yml up -d
211
211
  Per Kubernetes dobbiamo avere minikube installato.
212
212
  lanciare quindi l'ambiente di test:
213
213
 
214
+ ```shell script
214
215
  kubectl apply -f test_folder/test_k8s/test.yaml
216
+ docker-compose -f test_folder/test_k8s/docker-compose-local.yml up -d
217
+ ```
218
+
215
219
 
216
220
  To install this gem onto your local machine, run `bundle exec rake install`. To release a new version, update the version
217
221
  number in `version.rb`, and then run `bundle exec rake release`, which will create a git tag for the version, push git
@@ -2,7 +2,15 @@ require "zeitwerk"
2
2
  loader = Zeitwerk::Loader.for_gem
3
3
  loader.setup # ready!
4
4
 
5
+ require "active_support/core_ext/module/attribute_accessors"
5
6
  module SwarmClusterCliOpe
6
7
  class Error < StandardError; end
7
- # Your code goes here...
8
+
9
+
10
+ ##
11
+ # La configurazione che viene resa disponibile a tutte le funzioni sucessivamente all'interazione con il concern
12
+ # della configurazione o con il blocco di configurazione di un determinato enviroment
13
+ mattr_accessor :current_configuration
14
+ @@current_configuration = nil
15
+
8
16
  end
@@ -137,6 +137,8 @@ module SwarmClusterCliOpe
137
137
  exit
138
138
  end
139
139
 
140
+ evaluate_correct_command_usage(@_merged_configurations[@environment])
141
+
140
142
  @_merged_configurations[@environment]
141
143
 
142
144
  end
@@ -170,9 +172,22 @@ module SwarmClusterCliOpe
170
172
  end
171
173
  end
172
174
 
173
-
175
+ ##
176
+ # Indica il nome del progetto locale compose, quella parte di nome che viene attaccata in fronte
177
+ # ad ogni nome di servizio locale, e che come default è il nome della cartella in cui risiede
178
+ # il docker-compose.yml file
179
+ # @return [String]
180
+ def local_compose_project_name
181
+ File.basename(FileUtils.pwd).downcase
182
+ end
174
183
 
175
184
  private
185
+
186
+ ##
187
+ # Funzione che serve per identificare se siamo nella corretta classe di configurazione e di conseguenza nel corretto
188
+ # set di comandi di configurazione. Serve per non eseguire k8s con le vecchie impostazioni o viceversa
189
+ def evaluate_correct_command_usage(configuration) end
190
+
176
191
  ##
177
192
  # nome del file in cui salvare le configurazioni di progetto
178
193
  # @return [String]
@@ -79,17 +79,20 @@ module SwarmClusterCliOpe
79
79
  nodes.find { |c| c.id == node_id }
80
80
  end
81
81
 
82
- ##
83
- # Indica il nome del progetto locale compose, quella parte di nome che viene attaccata in fronte
84
- # ad ogni nome di servizio locale, e che come default è il nome della cartella in cui risiede
85
- # il docker-compose.yml file
86
- # @return [String]
87
- def local_compose_project_name
88
- File.basename(FileUtils.pwd).downcase
89
- end
90
82
 
91
83
  private
92
84
 
85
+
86
+ def evaluate_correct_command_usage(configuration)
87
+
88
+ if configuration[:connections_maps].keys.include?(:context)
89
+ puts "ATTENZIONE, I COMANDI DEVONO ESSERE LANCIATI DAL SUB COMANDO K8S"
90
+ exit
91
+ end
92
+
93
+ end
94
+
95
+
93
96
  ##
94
97
  # Path al file dove salviamo la cache dei managers, ha un TTL legato all'orario (anno-mese-giorno-ora)
95
98
  # quindi ogni ora si autoripulisce e con un md5 delle configurazioni di base
@@ -8,7 +8,7 @@ module SwarmClusterCliOpe
8
8
 
9
9
  # @return [SwarmClusterCliOpe::Configuration]
10
10
  def cfgs
11
- self.class.cfgs
11
+ SwarmClusterCliOpe.current_configuration ||= self.class.cfgs
12
12
  end
13
13
 
14
14
  end
@@ -2,14 +2,29 @@ module SwarmClusterCliOpe
2
2
  module Kubernetes
3
3
  class Configuration < BaseConfiguration
4
4
 
5
+ def shell
6
+ @_shell = Thor::Shell::Basic.new
7
+ end
8
+
9
+ delegate :yes?,to: :shell
5
10
 
6
11
  ##
7
12
  # In kubernetes abbiamo il context, il context può essere ricevuto o dalla configurazione oppure dal current_context
8
13
  # di kubelet
9
14
  # @return [String]
10
15
  def context
11
- cmd = ShellCommandExecution.new(['kubectl config current-context'])
12
- cmd.execute.raw_result[:stdout]
16
+
17
+ context = merged_configurations.dig(:connections_maps,:context) || nil
18
+
19
+ if context.nil?
20
+ cmd = ShellCommandExecution.new(['kubectl config current-context'])
21
+ context = cmd.execute.raw_result[:stdout]
22
+ unless yes? "Attenzione, non era presente il contesto nelle configurazioni, usiamo quello attualmente in uso: #{context}, proseguiamo lo stesso?[y,yes]"
23
+ exit
24
+ end
25
+
26
+ end
27
+ context
13
28
  end
14
29
 
15
30
  ##
@@ -33,16 +48,27 @@ module SwarmClusterCliOpe
33
48
  SyncConfigs::Sqlite3
34
49
  when 'rsync'
35
50
  SyncConfigs::Rsync
36
- # when 'mysql'
37
- # SyncConfigs::Mysql
38
- # when 'pg'
39
- # SyncConfigs::PostGres
51
+ when 'mysql'
52
+ SyncConfigs::Mysql
53
+ when 'pg'
54
+ SyncConfigs::PostGres
40
55
  else
41
56
  logger.error { "CONFIGURAIONE NON PREVISTA: #{name}" }
42
57
  nil
43
58
  end
44
59
  end
45
60
 
61
+ private
62
+
63
+ def evaluate_correct_command_usage(configuration)
64
+
65
+ unless configuration[:connections_maps].keys.include?(:context)
66
+ puts "ATTENZIONE, I COMANDI NON DEVONO ESSERE LANCIATI DAL SUB COMANDO K8S"
67
+ exit
68
+ end
69
+
70
+ end
71
+
46
72
 
47
73
  end
48
74
  end
@@ -30,9 +30,9 @@ module SwarmClusterCliOpe
30
30
  end
31
31
 
32
32
  # @param [String,Array<String>] cmd -> comando da passare a kubectl exec -- CMD
33
- # @return [SwarmClusterCliOpe::ShellCommandExecution]
33
+ # @return [SwarmClusterCliOpe::ShellCommandResponse]
34
34
  def exec(cmd)
35
- base_cmd(["exec", name, "--", cmd].flatten)
35
+ base_cmd(["exec", name, "--", cmd].flatten).execute
36
36
  end
37
37
 
38
38
  ##
@@ -98,10 +98,11 @@ module SwarmClusterCliOpe
98
98
  puts "Problemi nella ricerca del pod"
99
99
  exit
100
100
  else
101
- if ris.result[:items].empty?
102
- logger.warn { "non abbiamo trovato il pod" }
101
+ if ris.single_obj[:items].empty?
102
+ puts "non abbiamo trovato il pod"
103
+ exit
103
104
  else
104
- self.new(ris.result[:items].first, context: context)
105
+ self.new(ris.single_obj[:items].first, context: context)
105
106
  end
106
107
  end
107
108
  end
@@ -119,7 +120,7 @@ module SwarmClusterCliOpe
119
120
  puts "Problemi nella ricerca del pod"
120
121
  exit
121
122
  else
122
- self.new(ris.result, context: context)
123
+ self.new(ris.single_obj, context: context)
123
124
  end
124
125
  end
125
126
 
@@ -0,0 +1,10 @@
1
+ module SwarmClusterCliOpe
2
+ module Kubernetes
3
+ module SyncConfigs
4
+ class Mysql < SwarmClusterCliOpe::SyncConfigs::Mysql
5
+
6
+ include BaseDecorator
7
+ end
8
+ end
9
+ end
10
+ end
@@ -0,0 +1,11 @@
1
+ module SwarmClusterCliOpe
2
+ module Kubernetes
3
+ module SyncConfigs
4
+ class PostGres < SwarmClusterCliOpe::SyncConfigs::PostGres
5
+
6
+ include BaseDecorator
7
+
8
+ end
9
+ end
10
+ end
11
+ end
@@ -47,7 +47,7 @@ module SwarmClusterCliOpe
47
47
  end
48
48
 
49
49
  cmd = container.exec(['bash -c "apt update && apt install -yq rsync psmisc"'])
50
- if cmd.execute.failed?
50
+ if cmd.failed?
51
51
  puts "Problemi nell'installazione di rsync nel pod"
52
52
  else
53
53
  cmd = container.cp_in(File.expand_path("../../rsync_cfgs/rsyncd.conf", __FILE__), "/tmp/.")
@@ -55,54 +55,60 @@ module SwarmClusterCliOpe
55
55
  cmd = container.cp_in(File.expand_path("../../rsync_cfgs/rsyncd.secrets", __FILE__), "/tmp/.")
56
56
  copy_2 = cmd.execute.failed?
57
57
  cmd = container.exec(['bash -c "chmod 600 /tmp/rsyncd.secrets && chown root /tmp/*"'])
58
- chmod = cmd.execute.failed?
58
+ chmod = cmd.failed?
59
59
  if copy_1 or copy_2 or chmod
60
60
  puts "problema nella copia dei file di configurazione nel pod"
61
61
  else
62
62
 
63
-
64
- cmd = container.exec('bash -c "rsync --daemon --config=/tmp/rsyncd.conf --verbose --log-file=/tmp/rsync.log"')
65
- if cmd.execute.failed?
66
- say "Rsync non Inizializzato"
67
- else
68
- local_port = rand(30000..40000)
69
-
70
- p = IO.popen(container.base_cmd("port-forward #{podname} #{local_port}:873").string_command)
71
- pid = p.pid
72
- say "PID in execuzione port forward:#{pid}"
73
-
74
- sleep 1
75
-
76
- # lanciamo il comando quindi per far rsync
77
- rsync_command = [
78
- "rsync -az --no-o --no-g",
79
- "--delete",
80
- "--password-file=#{ File.expand_path("../../rsync_cfgs/password", __FILE__)}"
81
- ]
82
-
83
- if direction == :up
84
- rsync_command << "#{local_folder}/."
85
- rsync_command << "rsync://root@0.0.0.0:#{local_port}/archives#{remote_folder}"
63
+ begin
64
+ cmd = container.exec('bash -c "rsync --daemon --config=/tmp/rsyncd.conf --verbose --log-file=/tmp/rsync.log"')
65
+ if cmd.failed?
66
+ say "Rsync non Inizializzato"
86
67
  else
87
- rsync_command << "rsync://root@0.0.0.0:#{local_port}/archives#{remote_folder}/."
88
- rsync_command << local_folder
68
+ begin
69
+ local_port = rand(30000..40000)
70
+
71
+ p = IO.popen(container.base_cmd("port-forward #{podname} #{local_port}:873").string_command)
72
+ pid = p.pid
73
+ say "PID in execuzione port forward:#{pid}"
74
+
75
+ begin
76
+
77
+ sleep 1
78
+
79
+ # lanciamo il comando quindi per far rsync
80
+ rsync_command = [
81
+ "rsync -az --no-o --no-g",
82
+ "--delete",
83
+ "--password-file=#{ File.expand_path("../../rsync_cfgs/password", __FILE__)}"
84
+ ]
85
+
86
+ if direction == :up
87
+ rsync_command << "#{local_folder}/."
88
+ rsync_command << "rsync://root@0.0.0.0:#{local_port}/archives#{remote_folder}"
89
+ else
90
+ rsync_command << "rsync://root@0.0.0.0:#{local_port}/archives#{remote_folder}/."
91
+ rsync_command << local_folder
92
+ end
93
+ say "Eseguo rsync #{rsync_command.join(" ")}"
94
+
95
+ cmd = ShellCommandExecution.new(rsync_command)
96
+ cmd.execute
97
+
98
+ ensure
99
+ sleep 1
100
+ say "Stoppo porta forwarded"
101
+ Process.kill("INT", pid)
102
+ end
103
+ ensure
104
+ say "Tolgo il servizio di rsyn"
105
+ container.exec('bash -c "killall rsync"')
106
+ end
89
107
  end
90
- say "Eseguo rsync #{rsync_command.join(" ")}"
91
-
92
-
93
- cmd = ShellCommandExecution.new(rsync_command)
94
- cmd.execute
95
-
96
- sleep 1
97
- Process.kill("INT", pid)
98
-
99
-
100
- say "Eseguo pulizia"
101
- cmd = container.exec('bash -c "killall rsync"')
102
- cmd.execute
103
- cmd = container.exec('bash -c "rm -fr /tmp/rsyncd*"')
104
- cmd.execute
105
108
 
109
+ ensure
110
+ say "Eseguo pulizia configurazioni caricate"
111
+ container.exec('bash -c "rm -fr /tmp/rsyncd*"')
106
112
  end
107
113
 
108
114
  end
@@ -6,7 +6,7 @@ module SwarmClusterCliOpe
6
6
  return LoggerConcern.const_get("LOGGER") if LoggerConcern.const_defined?("LOGGER")
7
7
  logger = Logger.new(STDOUT)
8
8
  LoggerConcern.const_set("LOGGER", logger)
9
- logger.level = case Configuration.instance.logger_level
9
+ logger.level = case BaseConfiguration.instance.logger_level
10
10
  when "0"
11
11
  Logger::ERROR
12
12
  when "1"
@@ -1,8 +1,8 @@
1
1
  require 'forwardable'
2
2
 
3
3
  module SwarmClusterCliOpe
4
- ##
5
- # Identifica una risposta dalla shell
4
+ ##
5
+ # Identifica una risposta dalla shell
6
6
  class ShellCommandResponse
7
7
  extend Forwardable
8
8
  include LoggerConcern
@@ -28,18 +28,24 @@ module SwarmClusterCliOpe
28
28
  ##
29
29
  # Risultato, essendo sempre composto da una lista di righe in formato json, ritorniamo un array di json
30
30
  # @param [Object] object_class
31
- # @return [Array<object_class>]
32
- def result(object_class: OpenStruct)
31
+ # @return [Array<object_class>,Object]
32
+ def result(object_class: OpenStruct, single: false)
33
33
  #tento prima di estrapolare direttamente da json e sucessivamente come array
34
- begin
34
+ if single
35
35
  # questo per k8s, dato che abbiamo come risposta un json vero
36
- object_class.new(JSON.parse( raw_result[:stdout]))
37
- rescue
36
+ object_class.new(JSON.parse(raw_result[:stdout]))
37
+ else
38
38
  # questo nel caso siamo in swarm che ci ritorna un array di json
39
39
  raw_result[:stdout].split("\n").collect { |s| object_class.new(JSON.parse(s)) }
40
40
  end
41
41
  end
42
42
 
43
+ # @param [Class<OpenStruct>] object_class
44
+ # @return [Object]
45
+ def single_obj(object_class: OpenStruct)
46
+ result(object_class: object_class, single: true)
47
+ end
48
+
43
49
  #
44
50
  # def to_a
45
51
  # raw_result[:stdout].split("\n")
@@ -102,6 +102,13 @@ module SwarmClusterCliOpe
102
102
  else
103
103
  raise "ONLY [push|pull] action accepted"
104
104
  end
105
+
106
+ if direction == :push
107
+ unless yes? "ATTENZIONE STAI FACENDO PUSH, proseguire????[y,yes]"
108
+ exit "OK, CIAO"
109
+ end
110
+ end
111
+
105
112
  cfgs.env(options[:environment]) do |cfgs|
106
113
  sync_cfgs = cfgs.sync_configurations
107
114
  if sync_cfgs.empty?
@@ -7,9 +7,24 @@ module SwarmClusterCliOpe
7
7
  resume('pull')
8
8
  if yes?("Confermare il comando?[y,yes]")
9
9
  tmp_file = "/tmp/#{Time.now.to_i}.sql.gz"
10
- container.exec("bash -c 'mysqldump -u #{remote.username} --password=#{remote.password} #{remote.database_name} | gzip -c -f' > #{tmp_file}")
10
+ container.exec("bash -c 'mysqldump -u #{remote.username} --password=#{remote.password} #{remote.database_name} | gzip -c -f' > #{tmp_file}")
11
11
  local_container.copy_in(tmp_file, tmp_file)
12
- local_container.exec("bash -c 'zcat #{tmp_file} | mysql -u #{local.username} --password=#{local.password} #{local.database_name}'")
12
+ local_authentication = "-u #{local.username} --password=#{local.password}"
13
+
14
+ command = []
15
+ command << "bash -c '"
16
+
17
+ command << "mysql #{local_authentication} -e \"DROP DATABASE IF EXISTS #{local.database_name};CREATE DATABASE #{local.database_name}\""
18
+
19
+ command << "&&"
20
+
21
+ command << "zcat #{tmp_file}"
22
+ command << "|"
23
+ command << "mysql #{local_authentication} #{local.database_name}"
24
+
25
+ command << "'"
26
+
27
+ local_container.exec(command.join" ")
13
28
  end
14
29
  true
15
30
  end
@@ -2,7 +2,6 @@ module SwarmClusterCliOpe
2
2
  module SyncConfigs
3
3
  class PostGres < BaseDatabase
4
4
 
5
-
6
5
  def pull
7
6
  resume('pull')
8
7
 
@@ -13,11 +12,11 @@ module SwarmClusterCliOpe
13
12
  local.container.copy_in(tmp_file, tmp_file)
14
13
 
15
14
  # drop old db and recreate
16
- if Gem::Version.new(local.database_version) <= Gem::Version.new("12")
17
- close_connections_and_drop_cmd(local)
18
- else
19
- raise "DA ANALIZZARE QUANDO LA 13 disponibile....dropdb ha un force come parametro"
20
- end
15
+ # if Gem::Version.new(local.database_version) <= Gem::Version.new("12")
16
+ close_connections_and_drop_cmd(local)
17
+ # else
18
+ # raise "DA ANALIZZARE QUANDO LA 13 disponibile....dropdb ha un force come parametro"
19
+ # end
21
20
 
22
21
  create_cmd(local)
23
22
 
@@ -37,12 +36,8 @@ module SwarmClusterCliOpe
37
36
  dump_cmd(local, tmp_file)
38
37
  remote.container.copy_in(tmp_file, tmp_file)
39
38
 
40
- # drop old db and recreate
41
- if Gem::Version.new(local.database_version) <= Gem::Version.new("12")
42
- close_connections_and_drop_cmd(remote)
43
- else
44
- raise "DA ANALIZZARE QUANDO LA 13 disponibile....dropdb ha un force come parametro"
45
- end
39
+ close_connections_and_drop_cmd(remote)
40
+
46
41
  create_cmd(remote)
47
42
 
48
43
  restore_cmd(remote, tmp_file)
@@ -125,23 +120,31 @@ module SwarmClusterCliOpe
125
120
 
126
121
  end
127
122
 
128
-
129
123
  # @param [EnvConfigs] config
130
124
  def close_connections_and_drop_cmd(config)
125
+
131
126
  cmd = []
132
- cmd << "PGPASSWORD=\"#{config.password}\""
133
127
 
134
- sql = []
135
- sql << "SELECT pg_terminate_backend(pg_stat_activity.pid) FROM pg_stat_activity WHERE pg_stat_activity.datname = '\"'\"'#{config.database_name}'\"'\"' AND pid <> pg_backend_pid();;"
136
- sql << "DROP DATABASE IF EXISTS #{config.database_name};"
128
+ if Gem::Version.new(config.database_version) >= Gem::Version.new("13")
129
+ cmd << "export PGPASSWORD=\"#{config.password}\" &&"
130
+ cmd << 'dropdb --force --if-exists'
131
+ cmd << "-U #{config.username}"
132
+ cmd << config.database_name
137
133
 
138
- cmd << "echo \"#{sql.join(" ")}\" "
139
- cmd << '|'
140
- cmd << 'psql'
141
- cmd << "-U #{config.username}"
142
- cmd << "postgres"
143
- cmd
134
+ else
135
+ cmd << "export PGPASSWORD=\"#{config.password}\" &&"
144
136
 
137
+ sql = []
138
+ sql << "SELECT pg_terminate_backend(pg_stat_activity.pid) FROM pg_stat_activity WHERE pg_stat_activity.datname = '\"'\"'#{config.database_name}'\"'\"' AND pid <> pg_backend_pid();;"
139
+ sql << "DROP DATABASE IF EXISTS #{config.database_name};"
140
+
141
+ cmd << "echo \"#{sql.join(" ")}\" "
142
+ cmd << '|'
143
+ cmd << 'psql'
144
+ cmd << "-U #{config.username}"
145
+ cmd << "postgres"
146
+
147
+ end
145
148
  logger.info { "CLOSE CONNECTIONS COMMAND: #{cmd.join(' ')}" }
146
149
  config.container.exec("bash -c '#{cmd.join(' ')}'")
147
150
  end
@@ -153,7 +156,6 @@ module SwarmClusterCliOpe
153
156
  # PGPASSWORD='root' createdb -U root -h 0.0.0.0 -p 32790 development;
154
157
  # PGPASSWORD='root' psql -U root -h 0.0.0.0 -p 32790 -d development < ./cortobio_production_new_2020-09-10-171742.sql
155
158
 
156
-
157
159
  end
158
160
  end
159
161
  end
@@ -1,3 +1,3 @@
1
1
  module SwarmClusterCliOpe
2
- VERSION = "0.5.0.pre.3"
2
+ VERSION = "0.5.2"
3
3
  end
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: swarm_cluster_cli_ope
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.5.0.pre.3
4
+ version: 0.5.2
5
5
  platform: ruby
6
6
  authors:
7
7
  - Marino Bonetti
8
8
  autorequire:
9
9
  bindir: exe
10
10
  cert_chain: []
11
- date: 2020-11-12 00:00:00.000000000 Z
11
+ date: 2021-01-08 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: thor
@@ -105,6 +105,8 @@ files:
105
105
  - lib/swarm_cluster_cli_ope/kubernetes/rsync_cfgs/rsyncd.conf
106
106
  - lib/swarm_cluster_cli_ope/kubernetes/rsync_cfgs/rsyncd.secrets
107
107
  - lib/swarm_cluster_cli_ope/kubernetes/sync_configs/base_decorator.rb
108
+ - lib/swarm_cluster_cli_ope/kubernetes/sync_configs/mysql.rb
109
+ - lib/swarm_cluster_cli_ope/kubernetes/sync_configs/post_gres.rb
108
110
  - lib/swarm_cluster_cli_ope/kubernetes/sync_configs/rsync.rb
109
111
  - lib/swarm_cluster_cli_ope/kubernetes/sync_configs/sqlite3.rb
110
112
  - lib/swarm_cluster_cli_ope/logger_concern.rb
@@ -151,9 +153,9 @@ required_ruby_version: !ruby/object:Gem::Requirement
151
153
  version: 2.3.0
152
154
  required_rubygems_version: !ruby/object:Gem::Requirement
153
155
  requirements:
154
- - - ">"
156
+ - - ">="
155
157
  - !ruby/object:Gem::Version
156
- version: 1.3.1
158
+ version: '0'
157
159
  requirements: []
158
160
  rubygems_version: 3.0.8
159
161
  signing_key: