seira 0.1.7 → 0.2.0
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/README.md +5 -3
- data/lib/helpers.rb +13 -0
- data/lib/seira.rb +5 -1
- data/lib/seira/app.rb +49 -36
- data/lib/seira/cluster.rb +127 -1
- data/lib/seira/db/create.rb +14 -3
- data/lib/seira/jobs.rb +145 -0
- data/lib/seira/memcached.rb +8 -8
- data/lib/seira/redis.rb +8 -8
- data/lib/seira/secrets.rb +15 -15
- data/lib/seira/settings.rb +6 -2
- data/lib/seira/version.rb +1 -1
- metadata +4 -2
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA1:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 47534b5b777df0da7544ab1c9cbc1b426b336cbc
|
4
|
+
data.tar.gz: b562613300bc8f11a2639885a2cd957f1b97bd26
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 3940d87ebf4736b7af53dfa609817757eae92d8c3b8b5284cf5437ea67d46dfc521e4e7911ed8613c3b3f2ac1a9d21da7c8ee27563b5af928391671ecba3ba54
|
7
|
+
data.tar.gz: ca321c84dbe5b482adaa486b1d9b3ec9ef73b03911c2714601b3ce61440dcb2f4fb1fd5fa107ca523a7631c066c159cc46ec06bf055d6b3a2dbf5f7b683c95c9
|
data/README.md
CHANGED
@@ -70,9 +70,11 @@ seira:
|
|
70
70
|
cluster: gke_org-production_us-central1-a_production
|
71
71
|
aliases:
|
72
72
|
- "p"
|
73
|
-
|
74
|
-
- app1
|
75
|
-
|
73
|
+
applications:
|
74
|
+
- name: app1
|
75
|
+
golden_tier: "web"
|
76
|
+
- name: app2
|
77
|
+
golden_tier: "web"
|
76
78
|
```
|
77
79
|
|
78
80
|
This specification is read in and used to determine what `gcloud` context to use and what `kubectl` cluster to use when operating commands. For example, `seira internal` will connect to `org-internal` gcloud configuration and `gke_org-internal_us-central1-a_internal` kubectl cluster. For shorthand, `seira i` shorthand is specified as an alias.
|
data/lib/helpers.rb
ADDED
data/lib/seira.rb
CHANGED
@@ -1,12 +1,14 @@
|
|
1
1
|
require 'json'
|
2
2
|
require 'highline/import'
|
3
3
|
require 'colorize'
|
4
|
+
require 'tmpdir'
|
4
5
|
|
5
6
|
require "seira/version"
|
6
7
|
require 'seira/app'
|
7
8
|
require 'seira/cluster'
|
8
9
|
require 'seira/memcached'
|
9
10
|
require 'seira/pods'
|
11
|
+
require 'seira/jobs'
|
10
12
|
require 'seira/proxy'
|
11
13
|
require 'seira/random'
|
12
14
|
require 'seira/db'
|
@@ -22,6 +24,7 @@ module Seira
|
|
22
24
|
CATEGORIES = {
|
23
25
|
'secrets' => Seira::Secrets,
|
24
26
|
'pods' => Seira::Pods,
|
27
|
+
'jobs' => Seira::Jobs,
|
25
28
|
'db' => Seira::Db,
|
26
29
|
'redis' => Seira::Redis,
|
27
30
|
'memcached' => Seira::Memcached,
|
@@ -104,6 +107,7 @@ module Seira
|
|
104
107
|
{
|
105
108
|
cluster: cluster,
|
106
109
|
project: project,
|
110
|
+
settings: settings,
|
107
111
|
default_zone: settings.default_zone
|
108
112
|
}
|
109
113
|
end
|
@@ -118,7 +122,7 @@ module Seira
|
|
118
122
|
def perform_action_validation(klass:, action:)
|
119
123
|
return true if simple_cluster_change?
|
120
124
|
|
121
|
-
unless klass == Seira::Cluster || settings.
|
125
|
+
unless klass == Seira::Cluster || settings.applications.include?(app)
|
122
126
|
puts "Invalid app name specified"
|
123
127
|
exit(1)
|
124
128
|
end
|
data/lib/seira/app.rb
CHANGED
@@ -49,6 +49,13 @@ module Seira
|
|
49
49
|
run_apply(restart: true)
|
50
50
|
end
|
51
51
|
|
52
|
+
def ask_cluster_for_current_revision
|
53
|
+
tier = context[:settings].config_for_app(app)['golden_tier'] || 'web'
|
54
|
+
current_image = `kubectl get deployment --namespace=#{app} -l app=#{app},tier=#{tier} -o=jsonpath='{$.items[:1].spec.template.spec.containers[:1].image}'`.strip.chomp
|
55
|
+
current_revision = current_image.split(':').last
|
56
|
+
current_revision
|
57
|
+
end
|
58
|
+
|
52
59
|
private
|
53
60
|
|
54
61
|
def run_bootstrap
|
@@ -64,39 +71,40 @@ module Seira
|
|
64
71
|
|
65
72
|
# Kube vanilla based upgrade
|
66
73
|
def run_apply(restart: false)
|
67
|
-
|
68
|
-
|
69
|
-
|
70
|
-
|
71
|
-
|
72
|
-
|
73
|
-
|
74
|
-
|
75
|
-
|
74
|
+
Dir.mktmpdir do |dir|
|
75
|
+
destination = "#{dir}/#{context[:cluster]}/#{app}"
|
76
|
+
revision = ENV['REVISION']
|
77
|
+
|
78
|
+
if revision.nil?
|
79
|
+
current_revision = ask_cluster_for_current_revision
|
80
|
+
exit(1) unless HighLine.agree("No REVISION specified. Use current deployment revision '#{current_revision}'?")
|
81
|
+
revision = current_revision
|
82
|
+
end
|
76
83
|
|
77
|
-
|
78
|
-
|
79
|
-
|
80
|
-
|
84
|
+
replacement_hash = {
|
85
|
+
'REVISION' => revision,
|
86
|
+
'RESTARTED_AT_VALUE' => "Initial Deploy for #{revision}"
|
87
|
+
}
|
81
88
|
|
82
|
-
|
83
|
-
|
84
|
-
|
89
|
+
if restart
|
90
|
+
replacement_hash['RESTARTED_AT_VALUE'] = Time.now.to_s
|
91
|
+
end
|
85
92
|
|
86
|
-
|
87
|
-
|
88
|
-
|
89
|
-
|
90
|
-
|
93
|
+
replacement_hash.each do |k, v|
|
94
|
+
next unless v.nil? || v == ''
|
95
|
+
puts "Found nil or blank value for replacement hash key #{k}. Aborting!"
|
96
|
+
exit(1)
|
97
|
+
end
|
91
98
|
|
92
|
-
|
93
|
-
|
94
|
-
|
95
|
-
|
96
|
-
|
99
|
+
find_and_replace_revision(
|
100
|
+
source: "kubernetes/#{context[:cluster]}/#{app}",
|
101
|
+
destination: destination,
|
102
|
+
replacement_hash: replacement_hash
|
103
|
+
)
|
97
104
|
|
98
|
-
|
99
|
-
|
105
|
+
puts "Running 'kubectl apply -f #{destination}'"
|
106
|
+
system("kubectl apply -f #{destination}")
|
107
|
+
end
|
100
108
|
end
|
101
109
|
|
102
110
|
def run_scale
|
@@ -128,12 +136,7 @@ module Seira
|
|
128
136
|
main_secret_name = Seira::Secrets.new(app: app, action: action, args: args, context: context).main_secret_name
|
129
137
|
|
130
138
|
# 'internal' is a unique cluster/project "cluster". It always means production in terms of rails app.
|
131
|
-
rails_env =
|
132
|
-
if context[:cluster] == 'internal'
|
133
|
-
'production'
|
134
|
-
else
|
135
|
-
context[:cluster]
|
136
|
-
end
|
139
|
+
rails_env = Helpers.rails_env(context: context)
|
137
140
|
|
138
141
|
puts `kubectl create secret generic #{main_secret_name} --namespace #{app} --from-literal=RAILS_ENV=#{rails_env} --from-literal=RACK_ENV=#{rails_env}`
|
139
142
|
end
|
@@ -153,16 +156,26 @@ module Seira
|
|
153
156
|
end
|
154
157
|
|
155
158
|
def find_and_replace_revision(source:, destination:, replacement_hash:)
|
156
|
-
puts "Copying source yaml from #{source} to
|
159
|
+
puts "Copying source yaml from #{source} to temp folder"
|
157
160
|
FileUtils.mkdir_p destination # Create the nested directory
|
158
161
|
FileUtils.rm_rf("#{destination}/.", secure: true) # Clean out old files from the tmp folder
|
159
162
|
FileUtils.copy_entry source, destination
|
163
|
+
# Anything in jobs directory is not intended to be applied when deploying
|
164
|
+
# the app, but rather ran when needed as Job objects. Force to avoid exception if DNE.
|
165
|
+
FileUtils.rm_rf("#{destination}/jobs/") if File.directory?("#{destination}/jobs/")
|
160
166
|
|
161
167
|
# Iterate through each yaml file and find/replace and save
|
162
|
-
puts "Iterating
|
168
|
+
puts "Iterating temp folder files find/replace revision information"
|
163
169
|
Dir.foreach(destination) do |item|
|
164
170
|
next if item == '.' || item == '..'
|
165
171
|
|
172
|
+
# If we have run into a directory item, skip it
|
173
|
+
next if File.directory?("#{destination}/#{item}")
|
174
|
+
|
175
|
+
# Skip any manifest file that has "seira-skip.yaml" at the end. Common use case is for Job definitions
|
176
|
+
# to be used in "seira staging <app> jobs run"
|
177
|
+
next if item.end_with?("seira-skip.yaml")
|
178
|
+
|
166
179
|
text = File.read("#{destination}/#{item}")
|
167
180
|
|
168
181
|
new_contents = text
|
data/lib/seira/cluster.rb
CHANGED
@@ -5,7 +5,7 @@ require 'fileutils'
|
|
5
5
|
# Example usages:
|
6
6
|
module Seira
|
7
7
|
class Cluster
|
8
|
-
VALID_ACTIONS = %w[help bootstrap].freeze
|
8
|
+
VALID_ACTIONS = %w[help bootstrap upgrade].freeze
|
9
9
|
SUMMARY = "For managing whole clusters.".freeze
|
10
10
|
|
11
11
|
attr_reader :action, :args, :context, :settings
|
@@ -23,6 +23,8 @@ module Seira
|
|
23
23
|
run_help
|
24
24
|
when 'bootstrap'
|
25
25
|
run_bootstrap
|
26
|
+
when 'upgrade'
|
27
|
+
run_upgrade
|
26
28
|
else
|
27
29
|
fail "Unknown command encountered"
|
28
30
|
end
|
@@ -78,5 +80,129 @@ module Seira
|
|
78
80
|
puts `kubectl create secret docker-registry gcr-secret --docker-username=_json_key --docker-password="$(cat #{dockercfg_location})" --docker-server=https://gcr.io --docker-email=doesnotmatter@example.com`
|
79
81
|
puts `kubectl create secret generic cloudsql-credentials --namespace default --from-file=credentials.json=#{cloudsql_credentials_location}`
|
80
82
|
end
|
83
|
+
|
84
|
+
def run_upgrade
|
85
|
+
cluster = context[:cluster]
|
86
|
+
|
87
|
+
# Take a single argument, which is the version to upgrade to
|
88
|
+
new_version = args[0]
|
89
|
+
if new_version.nil?
|
90
|
+
puts 'must specify version to upgrade to'
|
91
|
+
exit(1)
|
92
|
+
end
|
93
|
+
|
94
|
+
# Ensure the specified version is supported by GKE
|
95
|
+
server_config = JSON.parse(`gcloud container get-server-config --format json`)
|
96
|
+
valid_versions = server_config['validMasterVersions']
|
97
|
+
unless valid_versions.include? new_version
|
98
|
+
puts "Version #{new_version} is unsupported. Supported versions are:"
|
99
|
+
puts valid_versions
|
100
|
+
exit(1)
|
101
|
+
end
|
102
|
+
|
103
|
+
cluster_config = JSON.parse(`gcloud container clusters describe #{cluster} --format json`)
|
104
|
+
|
105
|
+
# Update the master node first
|
106
|
+
puts 'updating master (this may take a while)'
|
107
|
+
if cluster_config['currentMasterVersion'] == new_version
|
108
|
+
# Master has already been updated; this step is not needed
|
109
|
+
puts 'already up to date'
|
110
|
+
elsif system("gcloud container clusters upgrade #{cluster} --cluster-version=#{new_version} --master")
|
111
|
+
puts 'master updated successfully'
|
112
|
+
else
|
113
|
+
puts 'failed to update master'
|
114
|
+
exit(1)
|
115
|
+
end
|
116
|
+
|
117
|
+
# Figure out what our current node pool setup is. The goal here is to be able to re-run this
|
118
|
+
# command if it fails partway through, and have it pick up where it left off.
|
119
|
+
pools = JSON.parse(`gcloud container node-pools list --cluster #{cluster} --format json`)
|
120
|
+
if pools.length == 2
|
121
|
+
# We have two node pools. Assume this is due to the upgrade process already being started,
|
122
|
+
# so we have one pool with the old version and one pool with the new version.
|
123
|
+
old_pool = pools.find { |p| p['version'] != new_version }
|
124
|
+
new_pool = pools.find { |p| p['version'] == new_version }
|
125
|
+
if old_pool.nil? || new_pool.nil?
|
126
|
+
# Turns out the two pools are not the result of a partially-finished upgrade; in this
|
127
|
+
# case we give up and the upgrade will have to proceed manually.
|
128
|
+
puts 'Unsupported node pool setup: could not find old and new pool'
|
129
|
+
exit(1)
|
130
|
+
end
|
131
|
+
elsif pools.length == 1
|
132
|
+
# Only one pool is the normal case; set old_pool and that's it.
|
133
|
+
old_pool = pools.first
|
134
|
+
else
|
135
|
+
# If we have three or more or zero pools, upgrade will have to proceed manually.
|
136
|
+
puts 'Unsupported node pool setup: unexpected number of pools'
|
137
|
+
exit(1)
|
138
|
+
end
|
139
|
+
# Get names of the nodes in the old node pool
|
140
|
+
old_nodes = `kubectl get nodes -l cloud.google.com/gke-nodepool=#{old_pool['name']} -o name`.split("\n")
|
141
|
+
|
142
|
+
# If we don't already have a new pool (i.e. one with the new version), create one
|
143
|
+
if new_pool.nil?
|
144
|
+
# Pick a name for the new pool, alternating between blue and green
|
145
|
+
new_pool_name = old_pool['name'] == 'blue' ? 'green' : 'blue'
|
146
|
+
|
147
|
+
# Create a new node pool with all the same settings as the old one. The version of the new
|
148
|
+
# pool will match the master version, which has already been updated.
|
149
|
+
puts 'creating new node pool'
|
150
|
+
command =
|
151
|
+
"gcloud container node-pools create #{new_pool_name} \
|
152
|
+
--cluster=#{cluster} \
|
153
|
+
--disk-size=#{old_pool['config']['diskSizeGb']} \
|
154
|
+
--image-type=#{old_pool['config']['imageType']} \
|
155
|
+
--machine-type=#{old_pool['config']['machineType']} \
|
156
|
+
--num-nodes=#{old_nodes.count} \
|
157
|
+
--service-account=#{old_pool['serviceAccount']}"
|
158
|
+
# TODO: support autoscaling if old pool has it turned on
|
159
|
+
if system(command)
|
160
|
+
puts 'new pool created successfully'
|
161
|
+
else
|
162
|
+
puts 'failed to create new pool'
|
163
|
+
exit(1)
|
164
|
+
end
|
165
|
+
end
|
166
|
+
|
167
|
+
# Cordon all the nodes in the old pool, preventing new workloads from being sent to them
|
168
|
+
puts 'cordoning old nodes'
|
169
|
+
old_nodes.each do |node|
|
170
|
+
unless system("kubectl cordon #{node}")
|
171
|
+
puts "failed to cordon node #{node}"
|
172
|
+
exit(1)
|
173
|
+
end
|
174
|
+
end
|
175
|
+
|
176
|
+
# Drain all the nodes in the old pool, moving workloads off of them gradually while
|
177
|
+
# respecting maxUnavailable etc.
|
178
|
+
puts 'draining old nodes'
|
179
|
+
old_nodes.each do |node|
|
180
|
+
# --force deletes pods that aren't managed by a ReplicationController, Job, or DaemonSet,
|
181
|
+
# which shouldn't be any besides manually created temp pods
|
182
|
+
# --ignore-daemonsets prevents failing due to presence of DaemonSets, which cannot be moved
|
183
|
+
# because they're tied to a specific node
|
184
|
+
# --delete-local-data prevents failing due to presence of local data, which cannot be moved
|
185
|
+
# but is bad practice to use for anything that can't be lost
|
186
|
+
puts "draining #{node}"
|
187
|
+
unless system("kubectl drain --force --ignore-daemonsets --delete-local-data #{node}")
|
188
|
+
puts "failed to drain node #{node}"
|
189
|
+
exit(1)
|
190
|
+
end
|
191
|
+
end
|
192
|
+
|
193
|
+
# All workloads which can be moved have been moved off of old node pool have been moved, so
|
194
|
+
# that node pool can be deleted, leaving only the new pool with the new version
|
195
|
+
if HighLine.agree('Delete old node pool?')
|
196
|
+
puts 'deleting old node pool'
|
197
|
+
if system("gcloud container node-pools delete #{old_pool['name']} --cluster #{cluster}")
|
198
|
+
puts 'old pool deleted successfully'
|
199
|
+
else
|
200
|
+
puts 'failed to delete old pool'
|
201
|
+
exit(1)
|
202
|
+
end
|
203
|
+
end
|
204
|
+
|
205
|
+
puts 'upgrade complete!'
|
206
|
+
end
|
81
207
|
end
|
82
208
|
end
|
data/lib/seira/db/create.rb
CHANGED
@@ -61,6 +61,8 @@ module Seira
|
|
61
61
|
@replica_for = arg.split('=')[1] # TODO: Read secret to get it automatically, but allow for fallback
|
62
62
|
elsif arg.start_with? '--highly-available'
|
63
63
|
@make_highly_available = true
|
64
|
+
elsif arg.start_with? '--database-name='
|
65
|
+
@database_name = arg.split('=')[1]
|
64
66
|
elsif /^--[\w\-]+=.+$/.match? arg
|
65
67
|
create_command += " #{arg}"
|
66
68
|
else
|
@@ -195,6 +197,10 @@ module Seira
|
|
195
197
|
name.gsub("handshake-", "")
|
196
198
|
end
|
197
199
|
|
200
|
+
def default_database_name
|
201
|
+
"#{app}_#{Helpers.rails_env(context: context)}"
|
202
|
+
end
|
203
|
+
|
198
204
|
def write_pgbouncer_yaml
|
199
205
|
# TODO: Clean this up by moving into a proper templated yaml file
|
200
206
|
pgbouncer_yaml = <<-FOO
|
@@ -244,7 +250,7 @@ spec:
|
|
244
250
|
database: #{name}
|
245
251
|
spec:
|
246
252
|
containers:
|
247
|
-
- image: handshake/pgbouncer:0.
|
253
|
+
- image: handshake/pgbouncer:0.2.0
|
248
254
|
name: pgbouncer
|
249
255
|
ports:
|
250
256
|
- containerPort: 6432
|
@@ -254,9 +260,14 @@ spec:
|
|
254
260
|
name: #{pgbouncer_configs_name}
|
255
261
|
- secretRef:
|
256
262
|
name: #{pgbouncer_secret_name}
|
263
|
+
env:
|
264
|
+
- name: "PGPORT"
|
265
|
+
value: "6432"
|
266
|
+
- name: "PGDATABASE"
|
267
|
+
value: "#{@database_name || default_database_name}"
|
257
268
|
readinessProbe:
|
258
|
-
|
259
|
-
|
269
|
+
exec:
|
270
|
+
command: ["psql", "-c", "SELECT 1;"]
|
260
271
|
initialDelaySeconds: 5
|
261
272
|
periodSeconds: 10
|
262
273
|
livenessProbe:
|
data/lib/seira/jobs.rb
ADDED
@@ -0,0 +1,145 @@
|
|
1
|
+
require 'json'
|
2
|
+
|
3
|
+
module Seira
|
4
|
+
class Jobs
|
5
|
+
VALID_ACTIONS = %w[help list delete run].freeze
|
6
|
+
SUMMARY = "Manage your application's jobs.".freeze
|
7
|
+
|
8
|
+
attr_reader :app, :action, :args, :job_name, :context
|
9
|
+
|
10
|
+
def initialize(app:, action:, args:, context:)
|
11
|
+
@app = app
|
12
|
+
@action = action
|
13
|
+
@context = context
|
14
|
+
@args = args
|
15
|
+
@job_name = args[0]
|
16
|
+
end
|
17
|
+
|
18
|
+
def run
|
19
|
+
case action
|
20
|
+
when 'help'
|
21
|
+
run_help
|
22
|
+
when 'list'
|
23
|
+
run_list
|
24
|
+
when 'delete'
|
25
|
+
run_delete
|
26
|
+
when 'run'
|
27
|
+
run_run
|
28
|
+
else
|
29
|
+
fail "Unknown command encountered"
|
30
|
+
end
|
31
|
+
end
|
32
|
+
|
33
|
+
private
|
34
|
+
|
35
|
+
def run_help
|
36
|
+
puts SUMMARY
|
37
|
+
puts "\n\n"
|
38
|
+
puts "TODO"
|
39
|
+
end
|
40
|
+
|
41
|
+
def run_list
|
42
|
+
puts `kubectl get jobs --namespace=#{app} -o wide`
|
43
|
+
end
|
44
|
+
|
45
|
+
def run_delete
|
46
|
+
puts `kubectl delete job #{job_name} --namespace=#{app}`
|
47
|
+
end
|
48
|
+
|
49
|
+
def run_run
|
50
|
+
gcp_app = App.new(app: app, action: 'apply', args: [""], context: context)
|
51
|
+
|
52
|
+
# Set defaults
|
53
|
+
async = false # Wait for job to finish before continuing.
|
54
|
+
no_delete = false # Delete at end
|
55
|
+
|
56
|
+
# Loop through args and process any that aren't just the command to run
|
57
|
+
loop do
|
58
|
+
arg = args.first
|
59
|
+
if arg.nil?
|
60
|
+
puts 'Please specify a command to run'
|
61
|
+
exit(1)
|
62
|
+
end
|
63
|
+
|
64
|
+
break unless arg.start_with? '--'
|
65
|
+
|
66
|
+
if arg == '--async'
|
67
|
+
async = true
|
68
|
+
elsif arg == '--no-delete'
|
69
|
+
no_delete = true
|
70
|
+
else
|
71
|
+
puts "Warning: Unrecognized argument #{arg}"
|
72
|
+
end
|
73
|
+
|
74
|
+
args.shift
|
75
|
+
end
|
76
|
+
|
77
|
+
if async && !no_delete
|
78
|
+
puts "Cannot delete Job after running if Job is async, since we don't know when it finishes."
|
79
|
+
exit(1)
|
80
|
+
end
|
81
|
+
|
82
|
+
# TODO: Configurable CPU and memory by args such as large, small, xlarge.
|
83
|
+
command = args.join(' ')
|
84
|
+
unique_name = "#{app}-run-#{Random.unique_name}"
|
85
|
+
revision = gcp_app.ask_cluster_for_current_revision # TODO: Make more reliable, especially with no web tier
|
86
|
+
replacement_hash = {
|
87
|
+
'UNIQUE_NAME' => unique_name,
|
88
|
+
'REVISION' => revision,
|
89
|
+
'COMMAND' => command.split(' ').map { |part| "\"#{part}\"" }.join(", "),
|
90
|
+
'CPU_REQUEST' => '200m',
|
91
|
+
'CPU_LIMIT' => '500m',
|
92
|
+
'MEMORY_REQUEST' => '500Mi',
|
93
|
+
'MEMORY_LIMIT' => '1Gi',
|
94
|
+
}
|
95
|
+
|
96
|
+
source = "kubernetes/#{context[:cluster]}/#{app}" # TODO: Move to method in app.rb
|
97
|
+
Dir.mktmpdir do |destination|
|
98
|
+
revision = ENV['REVISION']
|
99
|
+
file_name = "template.yaml"
|
100
|
+
|
101
|
+
FileUtils.mkdir_p destination # Create the nested directory
|
102
|
+
FileUtils.copy_file "#{source}/jobs/#{file_name}", "#{destination}/#{file_name}"
|
103
|
+
|
104
|
+
# TOOD: Move this into a method since it is copied from app.rb
|
105
|
+
text = File.read("#{destination}/#{file_name}")
|
106
|
+
new_contents = text
|
107
|
+
replacement_hash.each do |key, value|
|
108
|
+
new_contents.gsub!(key, value)
|
109
|
+
end
|
110
|
+
File.open("#{destination}/#{file_name}", 'w') { |file| file.write(new_contents) }
|
111
|
+
|
112
|
+
puts "Running 'kubectl apply -f #{destination}'"
|
113
|
+
system("kubectl apply -f #{destination}")
|
114
|
+
end
|
115
|
+
|
116
|
+
unless async
|
117
|
+
# Check job status until it's finished
|
118
|
+
print 'Waiting for job to complete...'
|
119
|
+
job_spec = nil
|
120
|
+
loop do
|
121
|
+
job_spec = JSON.parse(`kubectl --namespace=#{app} get job #{unique_name} -o json`)
|
122
|
+
break if !job_spec['status']['succeeded'].nil? || !job_spec['status']['failed'].nil?
|
123
|
+
print '.'
|
124
|
+
sleep 3
|
125
|
+
end
|
126
|
+
|
127
|
+
status =
|
128
|
+
if !job_spec['status']['succeeded'].nil?
|
129
|
+
"succeeded"
|
130
|
+
elsif !job_spec['status']['failed'].nil?
|
131
|
+
"failed"
|
132
|
+
else
|
133
|
+
"unknown"
|
134
|
+
end
|
135
|
+
|
136
|
+
if no_delete
|
137
|
+
puts "Job finished with status #{status}. Leaving Job object in cluster, clean up manually when confirmed."
|
138
|
+
else
|
139
|
+
print "Job finished with status #{status}. Deleting Job from cluster for cleanup."
|
140
|
+
system("kubectl delete job #{unique_name} -n #{app}")
|
141
|
+
end
|
142
|
+
end
|
143
|
+
end
|
144
|
+
end
|
145
|
+
end
|
data/lib/seira/memcached.rb
CHANGED
@@ -103,12 +103,12 @@ module Seira
|
|
103
103
|
end
|
104
104
|
end
|
105
105
|
|
106
|
-
|
107
|
-
|
108
|
-
|
109
|
-
|
110
|
-
|
111
|
-
|
106
|
+
Dir.mktmpdir do |dir|
|
107
|
+
file_name = write_config(dir: dir, values: values)
|
108
|
+
unique_name = Seira::Random.unique_name(existing_instances)
|
109
|
+
name = "#{app}-memcached-#{unique_name}"
|
110
|
+
puts `helm install --namespace #{app} --name #{name} --wait -f #{file_name} stable/memcached`
|
111
|
+
end
|
112
112
|
|
113
113
|
puts "To get status: 'seira #{context[:cluster]} #{app} memcached status #{unique_name}'"
|
114
114
|
puts "Service URI for this memcached instance: 'memcached://#{name}-memcached:11211'."
|
@@ -131,8 +131,8 @@ module Seira
|
|
131
131
|
end
|
132
132
|
end
|
133
133
|
|
134
|
-
def write_config(values)
|
135
|
-
file_name = "
|
134
|
+
def write_config(dir:, values:)
|
135
|
+
file_name = "#{dir}/temp-memcached-config-#{Seira::Cluster.current_cluster}-#{app}.json"
|
136
136
|
File.open(file_name, "wb") do |f|
|
137
137
|
f.write(values.to_json)
|
138
138
|
end
|
data/lib/seira/redis.rb
CHANGED
@@ -122,12 +122,12 @@ module Seira
|
|
122
122
|
end
|
123
123
|
end
|
124
124
|
|
125
|
-
|
126
|
-
|
127
|
-
|
128
|
-
|
129
|
-
|
130
|
-
|
125
|
+
Dir.mktmpdir do |dir|
|
126
|
+
file_name = write_config(dir: dir, values: values)
|
127
|
+
unique_name = Seira::Random.unique_name(existing_instances)
|
128
|
+
name = "#{app}-redis-#{unique_name}"
|
129
|
+
puts `helm install --namespace #{app} --name #{name} --wait -f #{file_name} stable/redis`
|
130
|
+
end
|
131
131
|
|
132
132
|
puts "To get status: 'seira #{context[:cluster]} #{app} redis status #{unique_name}'"
|
133
133
|
puts "To get credentials for storing in app secrets: 'seira #{context[:cluster]} #{app} redis credentials #{unique_name}'"
|
@@ -151,8 +151,8 @@ module Seira
|
|
151
151
|
end
|
152
152
|
end
|
153
153
|
|
154
|
-
def write_config(values)
|
155
|
-
file_name = "
|
154
|
+
def write_config(dir:, values:)
|
155
|
+
file_name = "#{dir}/temp-redis-config-#{Seira::Cluster.current_cluster}-#{app}.json"
|
156
156
|
File.open(file_name, "wb") do |f|
|
157
157
|
f.write(values.to_json)
|
158
158
|
end
|
data/lib/seira/secrets.rb
CHANGED
@@ -138,22 +138,22 @@ module Seira
|
|
138
138
|
# In the normal case the secret we are updating is just main_secret_name,
|
139
139
|
# but in special cases we may be doing an operation on a different secret
|
140
140
|
def write_secrets(secrets:, secret_name: main_secret_name)
|
141
|
-
|
142
|
-
|
143
|
-
|
141
|
+
Dir.mktmpdir do |dir|
|
142
|
+
file_name = "#{dir}/temp-secrets-#{Seira::Cluster.current_cluster}-#{secret_name}.json"
|
143
|
+
File.open(file_name, "w") do |f|
|
144
|
+
f.write(secrets.to_json)
|
145
|
+
end
|
146
|
+
|
147
|
+
# The command we use depends on if it already exists or not
|
148
|
+
secret_exists = system("kubectl get secret #{secret_name} --namespace #{app} > /dev/null")
|
149
|
+
command = secret_exists ? "replace" : "create"
|
150
|
+
|
151
|
+
if system("kubectl #{command} --namespace #{app} -f #{file_name}")
|
152
|
+
puts "Successfully created/replaced #{secret_name} secret #{key} in cluster #{Seira::Cluster.current_cluster}"
|
153
|
+
else
|
154
|
+
puts "Failed to update secret"
|
155
|
+
end
|
144
156
|
end
|
145
|
-
|
146
|
-
# The command we use depends on if it already exists or not
|
147
|
-
secret_exists = system("kubectl get secret #{secret_name} --namespace #{app} > /dev/null")
|
148
|
-
command = secret_exists ? "replace" : "create"
|
149
|
-
|
150
|
-
if system("kubectl #{command} --namespace #{app} -f #{file_name}")
|
151
|
-
puts "Successfully created/replaced #{secret_name} secret #{key} in cluster #{Seira::Cluster.current_cluster}"
|
152
|
-
else
|
153
|
-
puts "Failed to update secret"
|
154
|
-
end
|
155
|
-
|
156
|
-
File.delete(file_name)
|
157
157
|
end
|
158
158
|
|
159
159
|
# Returns the still-base64encoded secrets hashmap
|
data/lib/seira/settings.rb
CHANGED
@@ -24,8 +24,12 @@ module Seira
|
|
24
24
|
settings['seira']['default_zone']
|
25
25
|
end
|
26
26
|
|
27
|
-
def
|
28
|
-
settings['seira']['
|
27
|
+
def applications
|
28
|
+
settings['seira']['applications'].map { |app| app['name'] }
|
29
|
+
end
|
30
|
+
|
31
|
+
def config_for_app(app_name)
|
32
|
+
settings['seira']['applications'].find { |app| app['name'] == app_name }
|
29
33
|
end
|
30
34
|
|
31
35
|
def valid_cluster_names
|
data/lib/seira/version.rb
CHANGED
metadata
CHANGED
@@ -1,14 +1,14 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: seira
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.
|
4
|
+
version: 0.2.0
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Scott Ringwelski
|
8
8
|
autorequire:
|
9
9
|
bindir: bin
|
10
10
|
cert_chain: []
|
11
|
-
date: 2018-01-
|
11
|
+
date: 2018-01-27 00:00:00.000000000 Z
|
12
12
|
dependencies:
|
13
13
|
- !ruby/object:Gem::Dependency
|
14
14
|
name: highline
|
@@ -116,11 +116,13 @@ files:
|
|
116
116
|
- bin/console
|
117
117
|
- bin/seira
|
118
118
|
- bin/setup
|
119
|
+
- lib/helpers.rb
|
119
120
|
- lib/seira.rb
|
120
121
|
- lib/seira/app.rb
|
121
122
|
- lib/seira/cluster.rb
|
122
123
|
- lib/seira/db.rb
|
123
124
|
- lib/seira/db/create.rb
|
125
|
+
- lib/seira/jobs.rb
|
124
126
|
- lib/seira/memcached.rb
|
125
127
|
- lib/seira/pods.rb
|
126
128
|
- lib/seira/proxy.rb
|