swarm_cluster_cli_ope 0.4 → 0.5.0.pre.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,14 @@
1
+ syslog facility = local3
2
+ read only = no
3
+ list = yes
4
+ auth users = root
5
+ secrets file = /tmp/rsyncd.secrets
6
+ hosts allow = 0.0.0.0/0
7
+ uid = 0
8
+ gid = 0
9
+
10
+ [archives]
11
+ comment = archives goes here
12
+ path = /
13
+ uid = root
14
+ gid = root
@@ -0,0 +1,28 @@
1
+ require 'active_support/concern'
2
+ module SwarmClusterCliOpe
3
+ module Kubernetes
4
+ module SyncConfigs
5
+ module BaseDecorator
6
+ extend ActiveSupport::Concern
7
+
8
+ included do
9
+
10
+ delegate :namespace, :context, to: :@stack_cfgs
11
+
12
+ private
13
+
14
+ # @return [SwarmClusterCliOpe::Kubernetes::Pod]
15
+ def container
16
+ return @service if @service.is_a? SwarmClusterCliOpe::Kubernetes::Pod
17
+ @_container ||= Pod.find_by_selector(service, namespace: namespace, context: context)
18
+ end
19
+
20
+ end
21
+
22
+ # module ClassMethods
23
+
24
+ # end
25
+ end
26
+ end
27
+ end
28
+ end
@@ -0,0 +1,119 @@
1
+ module SwarmClusterCliOpe
2
+ module Kubernetes
3
+ module SyncConfigs
4
+ class Rsync < SwarmClusterCliOpe::SyncConfigs::Base
5
+
6
+ include BaseDecorator
7
+
8
+ # @return [String]
9
+ def local_folder
10
+ @configs[:configs][:local]
11
+ end
12
+
13
+ # @return [String]
14
+ def remote_folder
15
+ @configs[:configs][:remote]
16
+ end
17
+
18
+
19
+ # @return [SwarmClusterCliOpe::ShellCommandResponse]
20
+ def push
21
+ execute(direction: :up)
22
+ end
23
+
24
+ # @return [SwarmClusterCliOpe::ShellCommandResponse]
25
+ def pull
26
+ execute(direction: :down)
27
+ end
28
+
29
+
30
+ private
31
+
32
+ def execute(direction: :down)
33
+
34
+ if container.nil?
35
+ say "Container non trovato"
36
+ exit
37
+ end
38
+
39
+
40
+ if yes? "Attenzione, i dati locali o remoti verranno sovrascritti/cancellati?[y,yes]"
41
+
42
+ podname = container.name
43
+
44
+ if namespace.nil?
45
+ say "Mancata configurazione del namespace tramite argomento o .swarm_cluster_project"
46
+ exit
47
+ end
48
+
49
+ cmd = container.exec(['bash -c "apt update && apt install -yq rsync psmisc"'])
50
+ if cmd.execute.failed?
51
+ puts "Problemi nell'installazione di rsync nel pod"
52
+ else
53
+ cmd = container.cp_in(File.expand_path("../../rsync_cfgs/rsyncd.conf", __FILE__), "/tmp/.")
54
+ copy_1 = cmd.execute.failed?
55
+ cmd = container.cp_in(File.expand_path("../../rsync_cfgs/rsyncd.secrets", __FILE__), "/tmp/.")
56
+ copy_2 = cmd.execute.failed?
57
+ cmd = container.exec(['bash -c "chmod 600 /tmp/rsyncd.secrets && chown root /tmp/*"'])
58
+ chmod = cmd.execute.failed?
59
+ if copy_1 or copy_2 or chmod
60
+ puts "problema nella copia dei file di configurazione nel pod"
61
+ else
62
+
63
+
64
+ cmd = container.exec('bash -c "rsync --daemon --config=/tmp/rsyncd.conf --verbose --log-file=/tmp/rsync.log"')
65
+ if cmd.execute.failed?
66
+ say "Rsync non Inizializzato"
67
+ else
68
+ local_port = rand(30000..40000)
69
+
70
+ p = IO.popen(container.base_cmd("port-forward #{podname} #{local_port}:873").string_command)
71
+ pid = p.pid
72
+ say "PID in execuzione port forward:#{pid}"
73
+
74
+ sleep 1
75
+
76
+ # lanciamo il comando quindi per far rsync
77
+ rsync_command = [
78
+ "rsync -az --no-o --no-g",
79
+ "--delete",
80
+ "--password-file=#{ File.expand_path("../../rsync_cfgs/password", __FILE__)}"
81
+ ]
82
+
83
+ if direction == :up
84
+ rsync_command << "#{local_folder}/."
85
+ rsync_command << "rsync://root@0.0.0.0:#{local_port}/archives#{remote_folder}"
86
+ else
87
+ rsync_command << "rsync://root@0.0.0.0:#{local_port}/archives#{remote_folder}/."
88
+ rsync_command << local_folder
89
+ end
90
+ say "Eseguo rsync #{rsync_command.join(" ")}"
91
+
92
+
93
+ cmd = ShellCommandExecution.new(rsync_command)
94
+ cmd.execute
95
+
96
+ sleep 1
97
+ Process.kill("INT", pid)
98
+
99
+
100
+ say "Eseguo pulizia"
101
+ cmd = container.exec('bash -c "killall rsync"')
102
+ cmd.execute
103
+ cmd = container.exec('bash -c "rm -fr /tmp/rsyncd*"')
104
+ cmd.execute
105
+
106
+ end
107
+
108
+ end
109
+
110
+ end
111
+
112
+ end
113
+
114
+ end
115
+
116
+ end
117
+ end
118
+ end
119
+ end
@@ -0,0 +1,9 @@
1
+ module SwarmClusterCliOpe
2
+ module Kubernetes
3
+ module SyncConfigs
4
+ class Sqlite3 < SwarmClusterCliOpe::SyncConfigs::Sqlite3
5
+ include BaseDecorator
6
+ end
7
+ end
8
+ end
9
+ end
@@ -38,7 +38,7 @@ module SwarmClusterCliOpe
38
38
  pid: nil,
39
39
  status: nil
40
40
  }
41
- logger.debug { "SHELL: #{string_command}" }
41
+ logger.info { "SHELL: #{string_command}" }
42
42
  result[:status] = Open4::popen4(string_command) do |pid, stdin, stdout, stderr|
43
43
  stdin.close
44
44
 
@@ -30,7 +30,14 @@ module SwarmClusterCliOpe
30
30
  # @param [Object] object_class
31
31
  # @return [Array<object_class>]
32
32
  def result(object_class: OpenStruct)
33
- raw_result[:stdout].split("\n").collect { |s| object_class.new(JSON.parse(s)) }
33
+ #tento prima di estrapolare direttamente da json e sucessivamente come array
34
+ begin
35
+ # questo per k8s, dato che abbiamo come risposta un json vero
36
+ object_class.new(JSON.parse( raw_result[:stdout]))
37
+ rescue
38
+ # questo nel caso siamo in swarm che ci ritorna un array di json
39
+ raw_result[:stdout].split("\n").collect { |s| object_class.new(JSON.parse(s)) }
40
+ end
34
41
  end
35
42
 
36
43
  #
@@ -0,0 +1,128 @@
1
+ require 'active_support/concern'
2
+
3
+ module SwarmClusterCliOpe
4
+ module StackSyncConcern
5
+ extend ActiveSupport::Concern
6
+
7
+ included do
8
+
9
+ desc "stacksync [DIRECTION:pull|push]", "Si occupa di scaricare|caricare,utilizzando le configurazioni presenti, i dati dallo stack remoto"
10
+ long_desc <<-LONGDESC.gsub("\n", "\x5")
11
+ le configurazioni sono contenute nell'array: sync_configs.
12
+ ogni configurazione è composta da:
13
+ {
14
+ service:""
15
+ how:""
16
+ configs:{ }
17
+ }
18
+ - service è il nome del servizio (o nel caso di k8s una stringa da utilizzare come selettore labels)
19
+ - how è il come sincronizzare, definendo la tipologia:
20
+ ---- pg -> DB TODO
21
+ ---- mysql -> DB dump con mysql
22
+ ---- sqlite3 -> DB: viene eseguita una copia del file
23
+ ---- rsync -> RSYNC
24
+ - configs: è un hash con le configurazioni per ogni tipo di sincronizzazione
25
+
26
+ Possibili CFGS per tipologia:
27
+ rsync:
28
+ --local: -> path cartella locale
29
+ --remote: -> path cartella remota (contesto del container)
30
+
31
+ sqlite3:
32
+ --local: -> path al file
33
+ --remote: -> path al file remoto (contesto del container)
34
+
35
+ mysql:
36
+ --local: -> hash di configurazioni per il DB locale
37
+ - service: "db" -> nome del servizio nel compose locale, DEFAULT: quello definito sopra
38
+ - mysql_password_env: "MYSQL_PASSWORD" -> variabile ambiente interna al servizio contenente PASSWORD, DEFAULT: MYSQL_PASSWORD
39
+ - mysql_password: "root" -> valore in chiaro, in sostituzione della variabile ambiente, DEFAULT: root
40
+ - mysql_user_env: "MYSQL_USER" -> variabile ambiente interna al servizio contenente USERNAME, DEFAULT: MYSQL_USER
41
+ - mysql_user: "root" -> valore in chiaro, in sostituzione della variabile ambiente, DEFAULT: root
42
+ - database_name_env: "MYSQL_DATABASE" -> variabile ambiente interna al servizio contenente NOME DB, DEFAULT: MYSQL_DATABASE
43
+ - database_name: "nome_db" -> valore in chiaro, in sostituzione della variabile ambiente
44
+ --remote: -> hash di configurazioni per il DB remoto
45
+ - service: "db" -> nome del servizio nel compose locale, DEFAULT: quello definito sopra
46
+ - mysql_password_env: "MYSQL_PASSWORD" -> variabile ambiente interna al servizio contenente PASSWORD, DEFAULT: MYSQL_PASSWORD
47
+ - mysql_password: "root" -> valore in chiaro, in sostituzione della variabile ambiente, DEFAULT: root
48
+ - mysql_user_env: "MYSQL_USER" -> variabile ambiente interna al servizio contenente USERNAME, DEFAULT: MYSQL_USER
49
+ - mysql_user: "root" -> valore in chiaro, in sostituzione della variabile ambiente, DEFAULT: root
50
+ - database_name_env: "MYSQL_DATABASE" -> variabile ambiente interna al servizio contenente NOME DB, DEFAULT: MYSQL_DATABASE
51
+ - database_name: "MYSQL_DATABASE" -> valore in chiaro, in sostituzione della variabile ambiente
52
+ pg:
53
+ --local: -> hash di configurazioni per il DB locale
54
+ - service: "db" -> nome del servizio nel compose locale, DEFAULT: quello definito sopra
55
+ - pg_password_env: "POSTGRES_USER" -> variabile ambiente interna al servizio contenente PASSWORD, DEFAULT: POSTGRES_PASSWORD
56
+ - pg_password: "" -> valore in chiaro, in sostituzione della variabile ambiente
57
+ - pg_user_env: "POSTGRES_USER" -> variabile ambiente interna al servizio contenente USERNAME, DEFAULT: POSTGRES_USER
58
+ - pg_user: "postgres" -> valore in chiaro, in sostituzione della variabile ambiente, DEFAULT: postgres
59
+ - database_name_env: "POSTGRES_DB" -> variabile ambiente interna al servizio contenente NOME DB, DEFAULT: POSTGRES_DB
60
+ - database_name: "nome_db" -> valore in chiaro, in sostituzione della variabile ambiente
61
+ --remote: -> hash di configurazioni per il DB remoto
62
+ - service: "db" -> nome del servizio nel compose locale, DEFAULT: quello definito sopra
63
+ - pg_password_env: "POSTGRES_USER" -> variabile ambiente interna al servizio contenente PASSWORD, DEFAULT: POSTGRES_PASSWORD
64
+ - pg_password: "" -> valore in chiaro, in sostituzione della variabile ambiente
65
+ - pg_user_env: "POSTGRES_USER" -> variabile ambiente interna al servizio contenente USERNAME, DEFAULT: POSTGRES_USER
66
+ - pg_user: "postgres" -> valore in chiaro, in sostituzione della variabile ambiente, DEFAULT: postgres
67
+ - database_name_env: "POSTGRES_DB" -> variabile ambiente interna al servizio contenente NOME DB, DEFAULT: POSTGRES_DB
68
+ - database_name: "nome_db" -> valore in chiaro, in sostituzione della variabile ambiente
69
+
70
+
71
+ EXAMPLE:
72
+ Esempio di sincronizzazione di un file sqlite3 e una cartella
73
+ {
74
+ "stack_name": "test1",
75
+ "sync_configs": [
76
+ {
77
+ "service": "second",
78
+ "how": "rsync",
79
+ "configs": {
80
+ "remote": "/test_bind",
81
+ "local": "./uploads"
82
+ }
83
+ },
84
+ {
85
+ "service": "test_sqlite3",
86
+ "how": "sqlite3",
87
+ "configs": {
88
+ "remote": "/cartella_sqlite3/esempio.sqlite3",
89
+ "local": "./development.sqlite3"
90
+ }
91
+ }
92
+ ]
93
+ }
94
+ LONGDESC
95
+
96
+ def stacksync(direction)
97
+ direction = case direction
98
+ when 'push'
99
+ :push
100
+ when 'pull'
101
+ :pull
102
+ else
103
+ raise "ONLY [push|pull] action accepted"
104
+ end
105
+ cfgs.env(options[:environment]) do |cfgs|
106
+ sync_cfgs = cfgs.sync_configurations
107
+ if sync_cfgs.empty?
108
+ say "Attenzione, configurazioni di sincronizzazione vuoto. Leggere la documentazione"
109
+ else
110
+ sync_cfgs.each do |sync|
111
+ say "----------->>>>>>"
112
+ say "[ #{sync.class.name} ]"
113
+ sync.send(direction)
114
+ say "COMPLETE"
115
+ say "<<<<<<-----------"
116
+ end
117
+ end
118
+ end
119
+ end
120
+
121
+
122
+ end
123
+
124
+ # module ClassMethods
125
+
126
+ # end
127
+ end
128
+ end
@@ -9,7 +9,7 @@ module SwarmClusterCliOpe
9
9
  attr_accessor :configs
10
10
 
11
11
  # @param [Hash] configs
12
- # @param [Continuation] stack_cfgs
12
+ # @param [Configuration] stack_cfgs
13
13
  def initialize(stack_cfgs, configs)
14
14
  super()
15
15
  @configs = configs
@@ -4,12 +4,12 @@ module SwarmClusterCliOpe
4
4
 
5
5
  # @return [SwarmClusterCliOpe::SyncConfigs::EnvConfigs]
6
6
  def remote
7
- self.class::EnvConfigs.new(self, @configs[:configs][:remote] || {}, -> { container })
7
+ self.class::EnvConfigs.new(self, @configs.dig(:configs, :remote) || {}, -> { container })
8
8
  end
9
9
 
10
10
  # @return [SwarmClusterCliOpe::SyncConfigs::EnvConfigs]
11
11
  def local
12
- self.class::EnvConfigs.new(self, @configs[:configs][:local] || {}, -> { local_container })
12
+ self.class::EnvConfigs.new(self, @configs.dig(:configs, :local) || {}, -> { local_container })
13
13
  end
14
14
 
15
15
  ##
@@ -28,11 +28,13 @@ module SwarmClusterCliOpe
28
28
  database_name: #{local.database_name}
29
29
  username: #{local.username}
30
30
  password: #{local.password}
31
+ version: #{local.database_version}
31
32
  remote:
32
33
  service_name: #{remote.service_name}
33
34
  database_name: #{remote.database_name}
34
35
  username: #{remote.username}
35
- password: #{remote.password}"
36
+ password: #{remote.password}
37
+ version: #{remote.database_version}"
36
38
 
37
39
  end
38
40
 
@@ -34,6 +34,8 @@ module SwarmClusterCliOpe
34
34
  define_cfgs :username, default_env: "MYSQL_USER", configuration_name: :mysql_user, default_value: 'root'
35
35
  define_cfgs :password, default_env: "MYSQL_PASSWORD", configuration_name: :mysql_password, default_value: 'root'
36
36
 
37
+ define_cfgs :database_version, default_env: "MYSQL_MAJOR", configuration_name: :mysql_version
38
+
37
39
  end
38
40
 
39
41
 
@@ -2,28 +2,27 @@ module SwarmClusterCliOpe
2
2
  module SyncConfigs
3
3
  class PostGres < BaseDatabase
4
4
 
5
- # @return [TrueClass, FalseClass]
5
+
6
6
  def pull
7
7
  resume('pull')
8
8
 
9
- dump_cmd = dump_cmd(remote.username, remote.password, remote.database_name)
10
- logger.info{ "DUMP COMMAND: #{dump_cmd.join(' ')}"}
11
9
  if yes?("Confermare il comando?[y,yes]")
10
+
12
11
  tmp_file = "/tmp/#{Time.now.to_i}.sql.gz"
13
- container.exec("bash -c '#{dump_cmd.join(' ')}' > #{tmp_file}")
14
- local_container.copy_in(tmp_file, tmp_file)
12
+ dump_cmd(remote, tmp_file)
13
+ local.container.copy_in(tmp_file, tmp_file)
15
14
 
16
15
  # drop old db and recreate
17
- drop_cmd = drop_cmd(local.username, local.password, local.database_name)
18
- logger.info{ "DROP COMMAND: #{drop_cmd.join(' ')}"}
19
- local_container.exec("bash -c '#{drop_cmd.join(' ')}'")
20
- create_cmd = create_cmd(local.username, local.password, local.database_name)
21
- logger.info{ "CREATE COMMAND: #{create_cmd.join(' ')}"}
22
- local_container.exec("bash -c '#{create_cmd.join(' ')}'")
23
-
24
- restore_cmd = restore_cmd(local.username, local.password, local.database_name, tmp_file)
25
- logger.info{ "RESTORE COMMAND: #{restore_cmd.join(' ')}"}
26
- local_container.exec("bash -c '#{restore_cmd.join(' ')}'")
16
+ if Gem::Version.new(local.database_version) <= Gem::Version.new("12")
17
+ close_connections_and_drop_cmd(local)
18
+ else
19
+ raise "DA ANALIZZARE QUANDO LA 13 disponibile....dropdb ha un force come parametro"
20
+ end
21
+
22
+ create_cmd(local)
23
+
24
+ restore_cmd(local, tmp_file)
25
+
27
26
  end
28
27
  true
29
28
  end
@@ -32,25 +31,22 @@ module SwarmClusterCliOpe
32
31
  def push
33
32
  resume('PUSH')
34
33
 
35
- dump_cmd = dump_cmd(local.username, local.password, local.database_name)
36
- say "DUMP COMMAND: #{dump_cmd.join(' ')}"
37
34
  if yes?("ATTENZIONE !!!!!!PUSH!!!!! - Confermare il comando?[y,yes]")
35
+
38
36
  tmp_file = "/tmp/#{Time.now.to_i}.sql.gz"
39
- local_container.exec("bash -c '#{dump_cmd.join(' ')}' > #{tmp_file}")
40
- container.copy_in(tmp_file, tmp_file)
37
+ dump_cmd(local, tmp_file)
38
+ remote.container.copy_in(tmp_file, tmp_file)
41
39
 
42
40
  # drop old db and recreate
43
- drop_cmd = drop_cmd(remote.username, remote.password, remote.database_name)
44
- logger.info{ "DROP COMMAND: #{drop_cmd.join(' ')}"}
45
- container.exec("bash -c '#{drop_cmd.join(' ')}'")
41
+ if Gem::Version.new(local.database_version) <= Gem::Version.new("12")
42
+ close_connections_and_drop_cmd(remote)
43
+ else
44
+ raise "DA ANALIZZARE QUANDO LA 13 disponibile....dropdb ha un force come parametro"
45
+ end
46
+ create_cmd(remote)
46
47
 
47
- create_cmd = create_cmd(remote.username, remote.password, remote.database_name)
48
- logger.info{ "CREATE COMMAND: #{create_cmd.join(' ')}"}
49
- container.exec("bash -c '#{create_cmd.join(' ')}'")
48
+ restore_cmd(remote, tmp_file)
50
49
 
51
- restore_cmd = restore_cmd(remote.username, remote.password, remote.database_name, tmp_file)
52
- say "RESTORE COMMAND: #{restore_cmd.join(' ')}"
53
- container.exec("bash -c '#{restore_cmd.join(' ')}'")
54
50
  end
55
51
  true
56
52
  end
@@ -63,53 +59,101 @@ module SwarmClusterCliOpe
63
59
  define_cfgs :username, default_env: "POSTGRES_USER", configuration_name: :pg_user, default_value: 'postgres'
64
60
  define_cfgs :password, default_env: "POSTGRES_PASSWORD", configuration_name: :pg_password
65
61
 
62
+ define_cfgs :database_version, default_env: "PG_MAJOR", configuration_name: :pg_version
63
+
66
64
  end
67
65
 
68
66
  private
69
67
 
70
- def create_cmd(username, password, database_name)
68
+ # @param [EnvConfigs] config
69
+ def create_cmd(config)
71
70
  create_cmd = []
72
- create_cmd << "PGPASSWORD=\"#{password}\""
71
+ create_cmd << "PGPASSWORD=\"#{config.password}\""
73
72
  create_cmd << 'createdb'
74
- create_cmd << "--username=#{username}"
75
- create_cmd << database_name
76
- end
73
+ create_cmd << "--username=#{config.username}"
74
+ create_cmd << config.database_name
77
75
 
78
- def drop_cmd(username, password, database_name)
79
- drop_cmd = []
80
- drop_cmd << "PGPASSWORD=\"#{password}\""
81
- drop_cmd << 'dropdb'
82
- drop_cmd << "--username=#{username}"
83
- drop_cmd << database_name
84
- drop_cmd
76
+ logger.info { "CREATE COMMAND: #{create_cmd.join(' ')}" }
77
+ config.container.exec("bash -c '#{create_cmd.join(' ')} || true'")
85
78
  end
86
79
 
87
- def restore_cmd(username, password, database_name, tmp_file)
80
+ # @param [EnvConfigs] config
81
+ # def drop_cmd(config)
82
+ # drop_cmd = []
83
+ # drop_cmd << "PGPASSWORD=\"#{config.password}\""
84
+ # drop_cmd << 'dropdb'
85
+ # drop_cmd << '--if-exists'
86
+ # drop_cmd << "--username=#{config.username}"
87
+ # drop_cmd << config.database_name
88
+ # drop_cmd
89
+ #
90
+ # logger.info { "DROP COMMAND: #{drop_cmd.join(' ')}" }
91
+ # config.container.exec("bash -c '#{drop_cmd.join(' ')}'")
92
+ # end
93
+
94
+ # @param [EnvConfigs] config
95
+ def restore_cmd(config, tmp_file)
88
96
  restore_cmd = []
89
- restore_cmd << "PGPASSWORD=\"#{password}\""
97
+ restore_cmd << "PGPASSWORD=\"#{config.password}\""
90
98
  restore_cmd << 'pg_restore'
91
99
  restore_cmd << '--no-acl'
92
100
  restore_cmd << '--no-owner'
93
- restore_cmd << "--username=#{username}"
94
- restore_cmd << "--dbname=#{database_name}"
101
+ restore_cmd << "--username=#{config.username}"
102
+ restore_cmd << "--dbname=#{config.database_name}"
95
103
  restore_cmd << tmp_file
96
104
  restore_cmd
105
+
106
+ logger.info { "RESTORE COMMAND: #{restore_cmd.join(' ')}" }
107
+ config.container.exec("bash -c '#{restore_cmd.join(' ')}'")
97
108
  end
98
109
 
99
- def dump_cmd(username, password, database_name)
110
+ # @param [EnvConfigs] config
111
+ def dump_cmd(config, file)
100
112
  dump_cmd = []
101
- dump_cmd << "PGPASSWORD=\"#{password}\""
113
+ dump_cmd << "PGPASSWORD=\"#{config.password}\""
102
114
  dump_cmd << 'pg_dump'
103
115
  dump_cmd << '--no-acl'
104
116
  dump_cmd << '--no-owner'
105
- dump_cmd << "--username=#{username}"
117
+ dump_cmd << "--username=#{config.username}"
106
118
  dump_cmd << '--format=custom'
107
119
  dump_cmd << '--compress=9'
108
- dump_cmd << database_name
120
+ dump_cmd << config.database_name
109
121
  dump_cmd
122
+
123
+ logger.info { "DUMP COMMAND: #{dump_cmd.join(' ')}" }
124
+ config.container.exec("bash -c '#{dump_cmd.join(' ')}' > #{file}")
125
+
110
126
  end
111
127
 
112
128
 
129
+ # @param [EnvConfigs] config
130
+ def close_connections_and_drop_cmd(config)
131
+ cmd = []
132
+ cmd << "PGPASSWORD=\"#{config.password}\""
133
+
134
+ sql = []
135
+ sql << "SELECT pg_terminate_backend(pg_stat_activity.pid) FROM pg_stat_activity WHERE pg_stat_activity.datname = '\"'\"'#{config.database_name}'\"'\"' AND pid <> pg_backend_pid();;"
136
+ sql << "DROP DATABASE IF EXISTS #{config.database_name};"
137
+
138
+ cmd << "echo \"#{sql.join(" ")}\" "
139
+ cmd << '|'
140
+ cmd << 'psql'
141
+ cmd << "-U #{config.username}"
142
+ cmd << "postgres"
143
+ cmd
144
+
145
+ logger.info { "CLOSE CONNECTIONS COMMAND: #{cmd.join(' ')}" }
146
+ config.container.exec("bash -c '#{cmd.join(' ')}'")
147
+ end
148
+
149
+ # quello che fa capistrano quando copia in locale - utenze inventate
150
+ # gzip -d cortobio_production_new_2020-09-10-171742.sql.gz &&
151
+ # PGPASSWORD='root' psql -c "SELECT pg_terminate_backend(pg_stat_activity.pid) FROM pg_stat_activity WHERE pg_stat_activity.datname = 'development' AND pid <> pg_backend_pid();;" -U root -h 0.0.0.0 -p 32790 development;
152
+ # PGPASSWORD='root' dropdb -U root -h 0.0.0.0 -p 32790 development;
153
+ # PGPASSWORD='root' createdb -U root -h 0.0.0.0 -p 32790 development;
154
+ # PGPASSWORD='root' psql -U root -h 0.0.0.0 -p 32790 -d development < ./cortobio_production_new_2020-09-10-171742.sql
155
+
156
+
113
157
  end
114
158
  end
115
159
  end