seira 0.1.7 → 0.2.0

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA1:
3
- metadata.gz: ff2eb0fe8f04aa7734da72c7f0542c7d5b556b84
4
- data.tar.gz: 662cee51d65fc44ad8010b4dd3dfdc29b644de6b
3
+ metadata.gz: 47534b5b777df0da7544ab1c9cbc1b426b336cbc
4
+ data.tar.gz: b562613300bc8f11a2639885a2cd957f1b97bd26
5
5
  SHA512:
6
- metadata.gz: dc8e4d479761c4c4f3e09b2b56d0d2e70aca3295fceab18316f56751030b0cee4291a26c5dc9eeec00fcf1d2d1abdba5f47e945c06fdd440e3f861f069eacd14
7
- data.tar.gz: 47e734d06b5456df2f54497fb9efe7f02f44f02c7f54de12dc7ce2e8287c69a512cda8f3c96d3c68ea92fd4042149736292a650faafec4bb5c45ededd4ab7995
6
+ metadata.gz: 3940d87ebf4736b7af53dfa609817757eae92d8c3b8b5284cf5437ea67d46dfc521e4e7911ed8613c3b3f2ac1a9d21da7c8ee27563b5af928391671ecba3ba54
7
+ data.tar.gz: ca321c84dbe5b482adaa486b1d9b3ec9ef73b03911c2714601b3ce61440dcb2f4fb1fd5fa107ca523a7631c066c159cc46ec06bf055d6b3a2dbf5f7b683c95c9
data/README.md CHANGED
@@ -70,9 +70,11 @@ seira:
70
70
  cluster: gke_org-production_us-central1-a_production
71
71
  aliases:
72
72
  - "p"
73
- valid_apps:
74
- - app1
75
- - app2
73
+ applications:
74
+ - name: app1
75
+ golden_tier: "web"
76
+ - name: app2
77
+ golden_tier: "web"
76
78
  ```
77
79
 
78
80
  This specification is read in and used to determine what `gcloud` context to use and what `kubectl` cluster to use when operating commands. For example, `seira internal` will connect to `org-internal` gcloud configuration and `gke_org-internal_us-central1-a_internal` kubectl cluster. For shorthand, `seira i` shorthand is specified as an alias.
data/lib/helpers.rb ADDED
@@ -0,0 +1,13 @@
1
+ module Seira
2
+ class Helpers
3
+ class << self
4
+ def rails_env(context:)
5
+ if context[:cluster] == 'internal'
6
+ 'production'
7
+ else
8
+ context[:cluster]
9
+ end
10
+ end
11
+ end
12
+ end
13
+ end
data/lib/seira.rb CHANGED
@@ -1,12 +1,14 @@
1
1
  require 'json'
2
2
  require 'highline/import'
3
3
  require 'colorize'
4
+ require 'tmpdir'
4
5
 
5
6
  require "seira/version"
6
7
  require 'seira/app'
7
8
  require 'seira/cluster'
8
9
  require 'seira/memcached'
9
10
  require 'seira/pods'
11
+ require 'seira/jobs'
10
12
  require 'seira/proxy'
11
13
  require 'seira/random'
12
14
  require 'seira/db'
@@ -22,6 +24,7 @@ module Seira
22
24
  CATEGORIES = {
23
25
  'secrets' => Seira::Secrets,
24
26
  'pods' => Seira::Pods,
27
+ 'jobs' => Seira::Jobs,
25
28
  'db' => Seira::Db,
26
29
  'redis' => Seira::Redis,
27
30
  'memcached' => Seira::Memcached,
@@ -104,6 +107,7 @@ module Seira
104
107
  {
105
108
  cluster: cluster,
106
109
  project: project,
110
+ settings: settings,
107
111
  default_zone: settings.default_zone
108
112
  }
109
113
  end
@@ -118,7 +122,7 @@ module Seira
118
122
  def perform_action_validation(klass:, action:)
119
123
  return true if simple_cluster_change?
120
124
 
121
- unless klass == Seira::Cluster || settings.valid_apps.include?(app)
125
+ unless klass == Seira::Cluster || settings.applications.include?(app)
122
126
  puts "Invalid app name specified"
123
127
  exit(1)
124
128
  end
data/lib/seira/app.rb CHANGED
@@ -49,6 +49,13 @@ module Seira
49
49
  run_apply(restart: true)
50
50
  end
51
51
 
52
+ def ask_cluster_for_current_revision
53
+ tier = context[:settings].config_for_app(app)['golden_tier'] || 'web'
54
+ current_image = `kubectl get deployment --namespace=#{app} -l app=#{app},tier=#{tier} -o=jsonpath='{$.items[:1].spec.template.spec.containers[:1].image}'`.strip.chomp
55
+ current_revision = current_image.split(':').last
56
+ current_revision
57
+ end
58
+
52
59
  private
53
60
 
54
61
  def run_bootstrap
@@ -64,39 +71,40 @@ module Seira
64
71
 
65
72
  # Kube vanilla based upgrade
66
73
  def run_apply(restart: false)
67
- destination = "tmp/#{context[:cluster]}/#{app}"
68
- revision = ENV['REVISION']
69
-
70
- if revision.nil?
71
- current_image = `kubectl get deployment --namespace=#{app} -l app=#{app},tier=web -o=jsonpath='{$.items[:1].spec.template.spec.containers[:1].image}'`.strip.chomp
72
- current_revision = current_image.split(':').last
73
- exit(1) unless HighLine.agree("No REVISION specified. Use current deployment revision '#{current_revision}'?")
74
- revision = current_revision
75
- end
74
+ Dir.mktmpdir do |dir|
75
+ destination = "#{dir}/#{context[:cluster]}/#{app}"
76
+ revision = ENV['REVISION']
77
+
78
+ if revision.nil?
79
+ current_revision = ask_cluster_for_current_revision
80
+ exit(1) unless HighLine.agree("No REVISION specified. Use current deployment revision '#{current_revision}'?")
81
+ revision = current_revision
82
+ end
76
83
 
77
- replacement_hash = {
78
- 'REVISION' => revision,
79
- 'RESTARTED_AT_VALUE' => "Initial Deploy for #{revision}"
80
- }
84
+ replacement_hash = {
85
+ 'REVISION' => revision,
86
+ 'RESTARTED_AT_VALUE' => "Initial Deploy for #{revision}"
87
+ }
81
88
 
82
- if restart
83
- replacement_hash['RESTARTED_AT_VALUE'] = Time.now.to_s
84
- end
89
+ if restart
90
+ replacement_hash['RESTARTED_AT_VALUE'] = Time.now.to_s
91
+ end
85
92
 
86
- replacement_hash.each do |k, v|
87
- next unless v.nil? || v == ''
88
- puts "Found nil or blank value for replacement hash key #{k}. Aborting!"
89
- exit(1)
90
- end
93
+ replacement_hash.each do |k, v|
94
+ next unless v.nil? || v == ''
95
+ puts "Found nil or blank value for replacement hash key #{k}. Aborting!"
96
+ exit(1)
97
+ end
91
98
 
92
- find_and_replace_revision(
93
- source: "kubernetes/#{context[:cluster]}/#{app}",
94
- destination: destination,
95
- replacement_hash: replacement_hash
96
- )
99
+ find_and_replace_revision(
100
+ source: "kubernetes/#{context[:cluster]}/#{app}",
101
+ destination: destination,
102
+ replacement_hash: replacement_hash
103
+ )
97
104
 
98
- puts "Running 'kubectl apply -f #{destination}'"
99
- system("kubectl apply -f #{destination}")
105
+ puts "Running 'kubectl apply -f #{destination}'"
106
+ system("kubectl apply -f #{destination}")
107
+ end
100
108
  end
101
109
 
102
110
  def run_scale
@@ -128,12 +136,7 @@ module Seira
128
136
  main_secret_name = Seira::Secrets.new(app: app, action: action, args: args, context: context).main_secret_name
129
137
 
130
138
  # 'internal' is a unique cluster/project "cluster". It always means production in terms of rails app.
131
- rails_env =
132
- if context[:cluster] == 'internal'
133
- 'production'
134
- else
135
- context[:cluster]
136
- end
139
+ rails_env = Helpers.rails_env(context: context)
137
140
 
138
141
  puts `kubectl create secret generic #{main_secret_name} --namespace #{app} --from-literal=RAILS_ENV=#{rails_env} --from-literal=RACK_ENV=#{rails_env}`
139
142
  end
@@ -153,16 +156,26 @@ module Seira
153
156
  end
154
157
 
155
158
  def find_and_replace_revision(source:, destination:, replacement_hash:)
156
- puts "Copying source yaml from #{source} to #{destination}"
159
+ puts "Copying source yaml from #{source} to temp folder"
157
160
  FileUtils.mkdir_p destination # Create the nested directory
158
161
  FileUtils.rm_rf("#{destination}/.", secure: true) # Clean out old files from the tmp folder
159
162
  FileUtils.copy_entry source, destination
163
+ # Anything in jobs directory is not intended to be applied when deploying
164
+ # the app, but rather ran when needed as Job objects. Force to avoid exception if DNE.
165
+ FileUtils.rm_rf("#{destination}/jobs/") if File.directory?("#{destination}/jobs/")
160
166
 
161
167
  # Iterate through each yaml file and find/replace and save
162
- puts "Iterating #{destination} files find/replace revision information"
168
+ puts "Iterating temp folder files find/replace revision information"
163
169
  Dir.foreach(destination) do |item|
164
170
  next if item == '.' || item == '..'
165
171
 
172
+ # If we have run into a directory item, skip it
173
+ next if File.directory?("#{destination}/#{item}")
174
+
175
+ # Skip any manifest file that has "seira-skip.yaml" at the end. Common use case is for Job definitions
176
+ # to be used in "seira staging <app> jobs run"
177
+ next if item.end_with?("seira-skip.yaml")
178
+
166
179
  text = File.read("#{destination}/#{item}")
167
180
 
168
181
  new_contents = text
data/lib/seira/cluster.rb CHANGED
@@ -5,7 +5,7 @@ require 'fileutils'
5
5
  # Example usages:
6
6
  module Seira
7
7
  class Cluster
8
- VALID_ACTIONS = %w[help bootstrap].freeze
8
+ VALID_ACTIONS = %w[help bootstrap upgrade].freeze
9
9
  SUMMARY = "For managing whole clusters.".freeze
10
10
 
11
11
  attr_reader :action, :args, :context, :settings
@@ -23,6 +23,8 @@ module Seira
23
23
  run_help
24
24
  when 'bootstrap'
25
25
  run_bootstrap
26
+ when 'upgrade'
27
+ run_upgrade
26
28
  else
27
29
  fail "Unknown command encountered"
28
30
  end
@@ -78,5 +80,129 @@ module Seira
78
80
  puts `kubectl create secret docker-registry gcr-secret --docker-username=_json_key --docker-password="$(cat #{dockercfg_location})" --docker-server=https://gcr.io --docker-email=doesnotmatter@example.com`
79
81
  puts `kubectl create secret generic cloudsql-credentials --namespace default --from-file=credentials.json=#{cloudsql_credentials_location}`
80
82
  end
83
+
84
+ def run_upgrade
85
+ cluster = context[:cluster]
86
+
87
+ # Take a single argument, which is the version to upgrade to
88
+ new_version = args[0]
89
+ if new_version.nil?
90
+ puts 'must specify version to upgrade to'
91
+ exit(1)
92
+ end
93
+
94
+ # Ensure the specified version is supported by GKE
95
+ server_config = JSON.parse(`gcloud container get-server-config --format json`)
96
+ valid_versions = server_config['validMasterVersions']
97
+ unless valid_versions.include? new_version
98
+ puts "Version #{new_version} is unsupported. Supported versions are:"
99
+ puts valid_versions
100
+ exit(1)
101
+ end
102
+
103
+ cluster_config = JSON.parse(`gcloud container clusters describe #{cluster} --format json`)
104
+
105
+ # Update the master node first
106
+ puts 'updating master (this may take a while)'
107
+ if cluster_config['currentMasterVersion'] == new_version
108
+ # Master has already been updated; this step is not needed
109
+ puts 'already up to date'
110
+ elsif system("gcloud container clusters upgrade #{cluster} --cluster-version=#{new_version} --master")
111
+ puts 'master updated successfully'
112
+ else
113
+ puts 'failed to update master'
114
+ exit(1)
115
+ end
116
+
117
+ # Figure out what our current node pool setup is. The goal here is to be able to re-run this
118
+ # command if it fails partway through, and have it pick up where it left off.
119
+ pools = JSON.parse(`gcloud container node-pools list --cluster #{cluster} --format json`)
120
+ if pools.length == 2
121
+ # We have two node pools. Assume this is due to the upgrade process already being started,
122
+ # so we have one pool with the old version and one pool with the new version.
123
+ old_pool = pools.find { |p| p['version'] != new_version }
124
+ new_pool = pools.find { |p| p['version'] == new_version }
125
+ if old_pool.nil? || new_pool.nil?
126
+ # Turns out the two pools are not the result of a partially-finished upgrade; in this
127
+ # case we give up and the upgrade will have to proceed manually.
128
+ puts 'Unsupported node pool setup: could not find old and new pool'
129
+ exit(1)
130
+ end
131
+ elsif pools.length == 1
132
+ # Only one pool is the normal case; set old_pool and that's it.
133
+ old_pool = pools.first
134
+ else
135
+ # If we have three or more or zero pools, upgrade will have to proceed manually.
136
+ puts 'Unsupported node pool setup: unexpected number of pools'
137
+ exit(1)
138
+ end
139
+ # Get names of the nodes in the old node pool
140
+ old_nodes = `kubectl get nodes -l cloud.google.com/gke-nodepool=#{old_pool['name']} -o name`.split("\n")
141
+
142
+ # If we don't already have a new pool (i.e. one with the new version), create one
143
+ if new_pool.nil?
144
+ # Pick a name for the new pool, alternating between blue and green
145
+ new_pool_name = old_pool['name'] == 'blue' ? 'green' : 'blue'
146
+
147
+ # Create a new node pool with all the same settings as the old one. The version of the new
148
+ # pool will match the master version, which has already been updated.
149
+ puts 'creating new node pool'
150
+ command =
151
+ "gcloud container node-pools create #{new_pool_name} \
152
+ --cluster=#{cluster} \
153
+ --disk-size=#{old_pool['config']['diskSizeGb']} \
154
+ --image-type=#{old_pool['config']['imageType']} \
155
+ --machine-type=#{old_pool['config']['machineType']} \
156
+ --num-nodes=#{old_nodes.count} \
157
+ --service-account=#{old_pool['serviceAccount']}"
158
+ # TODO: support autoscaling if old pool has it turned on
159
+ if system(command)
160
+ puts 'new pool created successfully'
161
+ else
162
+ puts 'failed to create new pool'
163
+ exit(1)
164
+ end
165
+ end
166
+
167
+ # Cordon all the nodes in the old pool, preventing new workloads from being sent to them
168
+ puts 'cordoning old nodes'
169
+ old_nodes.each do |node|
170
+ unless system("kubectl cordon #{node}")
171
+ puts "failed to cordon node #{node}"
172
+ exit(1)
173
+ end
174
+ end
175
+
176
+ # Drain all the nodes in the old pool, moving workloads off of them gradually while
177
+ # respecting maxUnavailable etc.
178
+ puts 'draining old nodes'
179
+ old_nodes.each do |node|
180
+ # --force deletes pods that aren't managed by a ReplicationController, Job, or DaemonSet,
181
+ # which shouldn't be any besides manually created temp pods
182
+ # --ignore-daemonsets prevents failing due to presence of DaemonSets, which cannot be moved
183
+ # because they're tied to a specific node
184
+ # --delete-local-data prevents failing due to presence of local data, which cannot be moved
185
+ # but is bad practice to use for anything that can't be lost
186
+ puts "draining #{node}"
187
+ unless system("kubectl drain --force --ignore-daemonsets --delete-local-data #{node}")
188
+ puts "failed to drain node #{node}"
189
+ exit(1)
190
+ end
191
+ end
192
+
193
+ # All workloads which can be moved have been moved off of old node pool have been moved, so
194
+ # that node pool can be deleted, leaving only the new pool with the new version
195
+ if HighLine.agree('Delete old node pool?')
196
+ puts 'deleting old node pool'
197
+ if system("gcloud container node-pools delete #{old_pool['name']} --cluster #{cluster}")
198
+ puts 'old pool deleted successfully'
199
+ else
200
+ puts 'failed to delete old pool'
201
+ exit(1)
202
+ end
203
+ end
204
+
205
+ puts 'upgrade complete!'
206
+ end
81
207
  end
82
208
  end
@@ -61,6 +61,8 @@ module Seira
61
61
  @replica_for = arg.split('=')[1] # TODO: Read secret to get it automatically, but allow for fallback
62
62
  elsif arg.start_with? '--highly-available'
63
63
  @make_highly_available = true
64
+ elsif arg.start_with? '--database-name='
65
+ @database_name = arg.split('=')[1]
64
66
  elsif /^--[\w\-]+=.+$/.match? arg
65
67
  create_command += " #{arg}"
66
68
  else
@@ -195,6 +197,10 @@ module Seira
195
197
  name.gsub("handshake-", "")
196
198
  end
197
199
 
200
+ def default_database_name
201
+ "#{app}_#{Helpers.rails_env(context: context)}"
202
+ end
203
+
198
204
  def write_pgbouncer_yaml
199
205
  # TODO: Clean this up by moving into a proper templated yaml file
200
206
  pgbouncer_yaml = <<-FOO
@@ -244,7 +250,7 @@ spec:
244
250
  database: #{name}
245
251
  spec:
246
252
  containers:
247
- - image: handshake/pgbouncer:0.1.2
253
+ - image: handshake/pgbouncer:0.2.0
248
254
  name: pgbouncer
249
255
  ports:
250
256
  - containerPort: 6432
@@ -254,9 +260,14 @@ spec:
254
260
  name: #{pgbouncer_configs_name}
255
261
  - secretRef:
256
262
  name: #{pgbouncer_secret_name}
263
+ env:
264
+ - name: "PGPORT"
265
+ value: "6432"
266
+ - name: "PGDATABASE"
267
+ value: "#{@database_name || default_database_name}"
257
268
  readinessProbe:
258
- tcpSocket:
259
- port: 6432
269
+ exec:
270
+ command: ["psql", "-c", "SELECT 1;"]
260
271
  initialDelaySeconds: 5
261
272
  periodSeconds: 10
262
273
  livenessProbe:
data/lib/seira/jobs.rb ADDED
@@ -0,0 +1,145 @@
1
+ require 'json'
2
+
3
+ module Seira
4
+ class Jobs
5
+ VALID_ACTIONS = %w[help list delete run].freeze
6
+ SUMMARY = "Manage your application's jobs.".freeze
7
+
8
+ attr_reader :app, :action, :args, :job_name, :context
9
+
10
+ def initialize(app:, action:, args:, context:)
11
+ @app = app
12
+ @action = action
13
+ @context = context
14
+ @args = args
15
+ @job_name = args[0]
16
+ end
17
+
18
+ def run
19
+ case action
20
+ when 'help'
21
+ run_help
22
+ when 'list'
23
+ run_list
24
+ when 'delete'
25
+ run_delete
26
+ when 'run'
27
+ run_run
28
+ else
29
+ fail "Unknown command encountered"
30
+ end
31
+ end
32
+
33
+ private
34
+
35
+ def run_help
36
+ puts SUMMARY
37
+ puts "\n\n"
38
+ puts "TODO"
39
+ end
40
+
41
+ def run_list
42
+ puts `kubectl get jobs --namespace=#{app} -o wide`
43
+ end
44
+
45
+ def run_delete
46
+ puts `kubectl delete job #{job_name} --namespace=#{app}`
47
+ end
48
+
49
+ def run_run
50
+ gcp_app = App.new(app: app, action: 'apply', args: [""], context: context)
51
+
52
+ # Set defaults
53
+ async = false # Wait for job to finish before continuing.
54
+ no_delete = false # Delete at end
55
+
56
+ # Loop through args and process any that aren't just the command to run
57
+ loop do
58
+ arg = args.first
59
+ if arg.nil?
60
+ puts 'Please specify a command to run'
61
+ exit(1)
62
+ end
63
+
64
+ break unless arg.start_with? '--'
65
+
66
+ if arg == '--async'
67
+ async = true
68
+ elsif arg == '--no-delete'
69
+ no_delete = true
70
+ else
71
+ puts "Warning: Unrecognized argument #{arg}"
72
+ end
73
+
74
+ args.shift
75
+ end
76
+
77
+ if async && !no_delete
78
+ puts "Cannot delete Job after running if Job is async, since we don't know when it finishes."
79
+ exit(1)
80
+ end
81
+
82
+ # TODO: Configurable CPU and memory by args such as large, small, xlarge.
83
+ command = args.join(' ')
84
+ unique_name = "#{app}-run-#{Random.unique_name}"
85
+ revision = gcp_app.ask_cluster_for_current_revision # TODO: Make more reliable, especially with no web tier
86
+ replacement_hash = {
87
+ 'UNIQUE_NAME' => unique_name,
88
+ 'REVISION' => revision,
89
+ 'COMMAND' => command.split(' ').map { |part| "\"#{part}\"" }.join(", "),
90
+ 'CPU_REQUEST' => '200m',
91
+ 'CPU_LIMIT' => '500m',
92
+ 'MEMORY_REQUEST' => '500Mi',
93
+ 'MEMORY_LIMIT' => '1Gi',
94
+ }
95
+
96
+ source = "kubernetes/#{context[:cluster]}/#{app}" # TODO: Move to method in app.rb
97
+ Dir.mktmpdir do |destination|
98
+ revision = ENV['REVISION']
99
+ file_name = "template.yaml"
100
+
101
+ FileUtils.mkdir_p destination # Create the nested directory
102
+ FileUtils.copy_file "#{source}/jobs/#{file_name}", "#{destination}/#{file_name}"
103
+
104
+ # TOOD: Move this into a method since it is copied from app.rb
105
+ text = File.read("#{destination}/#{file_name}")
106
+ new_contents = text
107
+ replacement_hash.each do |key, value|
108
+ new_contents.gsub!(key, value)
109
+ end
110
+ File.open("#{destination}/#{file_name}", 'w') { |file| file.write(new_contents) }
111
+
112
+ puts "Running 'kubectl apply -f #{destination}'"
113
+ system("kubectl apply -f #{destination}")
114
+ end
115
+
116
+ unless async
117
+ # Check job status until it's finished
118
+ print 'Waiting for job to complete...'
119
+ job_spec = nil
120
+ loop do
121
+ job_spec = JSON.parse(`kubectl --namespace=#{app} get job #{unique_name} -o json`)
122
+ break if !job_spec['status']['succeeded'].nil? || !job_spec['status']['failed'].nil?
123
+ print '.'
124
+ sleep 3
125
+ end
126
+
127
+ status =
128
+ if !job_spec['status']['succeeded'].nil?
129
+ "succeeded"
130
+ elsif !job_spec['status']['failed'].nil?
131
+ "failed"
132
+ else
133
+ "unknown"
134
+ end
135
+
136
+ if no_delete
137
+ puts "Job finished with status #{status}. Leaving Job object in cluster, clean up manually when confirmed."
138
+ else
139
+ print "Job finished with status #{status}. Deleting Job from cluster for cleanup."
140
+ system("kubectl delete job #{unique_name} -n #{app}")
141
+ end
142
+ end
143
+ end
144
+ end
145
+ end
@@ -103,12 +103,12 @@ module Seira
103
103
  end
104
104
  end
105
105
 
106
- file_name = write_config(values)
107
- unique_name = Seira::Random.unique_name(existing_instances)
108
- name = "#{app}-memcached-#{unique_name}"
109
- puts `helm install --namespace #{app} --name #{name} --wait -f #{file_name} stable/memcached`
110
-
111
- File.delete(file_name)
106
+ Dir.mktmpdir do |dir|
107
+ file_name = write_config(dir: dir, values: values)
108
+ unique_name = Seira::Random.unique_name(existing_instances)
109
+ name = "#{app}-memcached-#{unique_name}"
110
+ puts `helm install --namespace #{app} --name #{name} --wait -f #{file_name} stable/memcached`
111
+ end
112
112
 
113
113
  puts "To get status: 'seira #{context[:cluster]} #{app} memcached status #{unique_name}'"
114
114
  puts "Service URI for this memcached instance: 'memcached://#{name}-memcached:11211'."
@@ -131,8 +131,8 @@ module Seira
131
131
  end
132
132
  end
133
133
 
134
- def write_config(values)
135
- file_name = "tmp/temp-memcached-config-#{Seira::Cluster.current_cluster}-#{app}.json"
134
+ def write_config(dir:, values:)
135
+ file_name = "#{dir}/temp-memcached-config-#{Seira::Cluster.current_cluster}-#{app}.json"
136
136
  File.open(file_name, "wb") do |f|
137
137
  f.write(values.to_json)
138
138
  end
data/lib/seira/redis.rb CHANGED
@@ -122,12 +122,12 @@ module Seira
122
122
  end
123
123
  end
124
124
 
125
- file_name = write_config(values)
126
- unique_name = Seira::Random.unique_name(existing_instances)
127
- name = "#{app}-redis-#{unique_name}"
128
- puts `helm install --namespace #{app} --name #{name} --wait -f #{file_name} stable/redis`
129
-
130
- File.delete(file_name)
125
+ Dir.mktmpdir do |dir|
126
+ file_name = write_config(dir: dir, values: values)
127
+ unique_name = Seira::Random.unique_name(existing_instances)
128
+ name = "#{app}-redis-#{unique_name}"
129
+ puts `helm install --namespace #{app} --name #{name} --wait -f #{file_name} stable/redis`
130
+ end
131
131
 
132
132
  puts "To get status: 'seira #{context[:cluster]} #{app} redis status #{unique_name}'"
133
133
  puts "To get credentials for storing in app secrets: 'seira #{context[:cluster]} #{app} redis credentials #{unique_name}'"
@@ -151,8 +151,8 @@ module Seira
151
151
  end
152
152
  end
153
153
 
154
- def write_config(values)
155
- file_name = "tmp/temp-redis-config-#{Seira::Cluster.current_cluster}-#{app}.json"
154
+ def write_config(dir:, values:)
155
+ file_name = "#{dir}/temp-redis-config-#{Seira::Cluster.current_cluster}-#{app}.json"
156
156
  File.open(file_name, "wb") do |f|
157
157
  f.write(values.to_json)
158
158
  end
data/lib/seira/secrets.rb CHANGED
@@ -138,22 +138,22 @@ module Seira
138
138
  # In the normal case the secret we are updating is just main_secret_name,
139
139
  # but in special cases we may be doing an operation on a different secret
140
140
  def write_secrets(secrets:, secret_name: main_secret_name)
141
- file_name = "tmp/temp-secrets-#{Seira::Cluster.current_cluster}-#{secret_name}.json"
142
- File.open(file_name, "wb") do |f|
143
- f.write(secrets.to_json)
141
+ Dir.mktmpdir do |dir|
142
+ file_name = "#{dir}/temp-secrets-#{Seira::Cluster.current_cluster}-#{secret_name}.json"
143
+ File.open(file_name, "w") do |f|
144
+ f.write(secrets.to_json)
145
+ end
146
+
147
+ # The command we use depends on if it already exists or not
148
+ secret_exists = system("kubectl get secret #{secret_name} --namespace #{app} > /dev/null")
149
+ command = secret_exists ? "replace" : "create"
150
+
151
+ if system("kubectl #{command} --namespace #{app} -f #{file_name}")
152
+ puts "Successfully created/replaced #{secret_name} secret #{key} in cluster #{Seira::Cluster.current_cluster}"
153
+ else
154
+ puts "Failed to update secret"
155
+ end
144
156
  end
145
-
146
- # The command we use depends on if it already exists or not
147
- secret_exists = system("kubectl get secret #{secret_name} --namespace #{app} > /dev/null")
148
- command = secret_exists ? "replace" : "create"
149
-
150
- if system("kubectl #{command} --namespace #{app} -f #{file_name}")
151
- puts "Successfully created/replaced #{secret_name} secret #{key} in cluster #{Seira::Cluster.current_cluster}"
152
- else
153
- puts "Failed to update secret"
154
- end
155
-
156
- File.delete(file_name)
157
157
  end
158
158
 
159
159
  # Returns the still-base64encoded secrets hashmap
@@ -24,8 +24,12 @@ module Seira
24
24
  settings['seira']['default_zone']
25
25
  end
26
26
 
27
- def valid_apps
28
- settings['seira']['valid_apps']
27
+ def applications
28
+ settings['seira']['applications'].map { |app| app['name'] }
29
+ end
30
+
31
+ def config_for_app(app_name)
32
+ settings['seira']['applications'].find { |app| app['name'] == app_name }
29
33
  end
30
34
 
31
35
  def valid_cluster_names
data/lib/seira/version.rb CHANGED
@@ -1,3 +1,3 @@
1
1
  module Seira
2
- VERSION = "0.1.7".freeze
2
+ VERSION = "0.2.0".freeze
3
3
  end
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: seira
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.1.7
4
+ version: 0.2.0
5
5
  platform: ruby
6
6
  authors:
7
7
  - Scott Ringwelski
8
8
  autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2018-01-02 00:00:00.000000000 Z
11
+ date: 2018-01-27 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: highline
@@ -116,11 +116,13 @@ files:
116
116
  - bin/console
117
117
  - bin/seira
118
118
  - bin/setup
119
+ - lib/helpers.rb
119
120
  - lib/seira.rb
120
121
  - lib/seira/app.rb
121
122
  - lib/seira/cluster.rb
122
123
  - lib/seira/db.rb
123
124
  - lib/seira/db/create.rb
125
+ - lib/seira/jobs.rb
124
126
  - lib/seira/memcached.rb
125
127
  - lib/seira/pods.rb
126
128
  - lib/seira/proxy.rb