seira 0.3.7 → 0.4.0

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA1:
3
- metadata.gz: a66eccb73df5cd20cda9716c793546964c39698a
4
- data.tar.gz: a9aa6dd6d05feff2597790ed469ad532beb19ef3
3
+ metadata.gz: 6bcdddd7385af9bd0c795472c6f1fe911d9586b1
4
+ data.tar.gz: ad65f785990edea62925605e04bba6b4d116cd93
5
5
  SHA512:
6
- metadata.gz: 36eb393a778a70f2147012d9cbe9698c5293bfc743a9349e5cd2053df7a09d8fcd7cd0c28566fb607d710cee97da9a873210617066ea5c4c77a820245ad1c6e7
7
- data.tar.gz: afcbbed1d2388ef1c635aa010a30f486ddacf3d11e2e100018f22e964c621a626661bde901d54b1c612562eeeac135003fabab145576f3034bb20204f1e95ecc
6
+ metadata.gz: a7c87cba6fa27f9debf2f11b0db52ecab602465e92e77c1a9774f875152d79c1b11b996d05dfcfbc671731a5557b2fb975031dad69c08a5709892e1b79593eb0
7
+ data.tar.gz: 5e8d13d180754d03d0abeb381047efae3ae65eb795bb6320f36fe3e5ce81915ac2ca530108571ae90451f1a1c0aa7dd6b760ea1b1e4ac35f9ac779acada61ae5
data/lib/helpers.rb CHANGED
@@ -11,23 +11,23 @@ module Seira
11
11
  end
12
12
  end
13
13
 
14
- def fetch_pods(filters:, app:)
15
- filter_string = { app: app }.merge(filters).map { |k, v| "#{k}=#{v}" }.join(',')
16
- output = Seira::Commands.kubectl("get pods -o json --selector=#{filter_string}", context: { app: app }, return_output: true)
14
+ def fetch_pods(filters:, context:)
15
+ filter_string = { app: context[:app] }.merge(filters).map { |k, v| "#{k}=#{v}" }.join(',')
16
+ output = Seira::Commands.kubectl("get pods -o json --selector=#{filter_string}", context: context, return_output: true)
17
17
  JSON.parse(output)['items']
18
18
  end
19
19
 
20
- def log_link(context:, app:, query:)
20
+ def log_link(context:, query:)
21
21
  link = context[:settings].log_link_format
22
22
  return nil if link.nil?
23
- link.gsub! 'APP', app
23
+ link.gsub! 'APP', context[:app]
24
24
  link.gsub! 'CLUSTER', context[:cluster]
25
25
  link.gsub! 'QUERY', query
26
26
  link
27
27
  end
28
28
 
29
- def get_secret(app:, key:, context: {})
30
- Secrets.new(app: app, action: 'get', args: [], context: context).get(key)
29
+ def get_secret(key:, context:)
30
+ Secrets.new(app: context[:app], action: 'get', args: [], context: context).get(key)
31
31
  end
32
32
  end
33
33
  end
data/lib/seira/app.rb CHANGED
@@ -77,9 +77,16 @@ module Seira
77
77
  # Kube vanilla based upgrade
78
78
  def run_apply(restart: false)
79
79
  async = false
80
+ revision = nil
81
+ deployment = :all
82
+
80
83
  args.each do |arg|
81
84
  if arg == '--async'
82
85
  async = true
86
+ elsif arg.start_with? '--deployment='
87
+ deployment = arg.split('=')[1]
88
+ elsif revision.nil?
89
+ revision = arg
83
90
  else
84
91
  puts "Warning: unrecognized argument #{arg}"
85
92
  end
@@ -87,7 +94,7 @@ module Seira
87
94
 
88
95
  Dir.mktmpdir do |dir|
89
96
  destination = "#{dir}/#{context[:cluster]}/#{app}"
90
- revision = args.first || ENV['REVISION']
97
+ revision ||= ENV['REVISION']
91
98
 
92
99
  if revision.nil?
93
100
  current_revision = ask_cluster_for_current_revision
@@ -116,12 +123,20 @@ module Seira
116
123
  replacement_hash: replacement_hash
117
124
  )
118
125
 
119
- kubectl("apply -f #{destination}", context: context)
126
+ to_apply = destination
127
+ to_apply += "/#{deployment}.yaml" unless deployment == :all
128
+ kubectl("apply -f #{to_apply}", context: context)
120
129
 
121
130
  unless async
122
131
  puts "Monitoring rollout status..."
123
132
  # Wait for rollout of all deployments to complete (running `kubectl rollout status` in parallel via xargs)
124
- exit 1 unless system("kubectl get deployments -n #{app} -o name | xargs -n1 -P10 kubectl rollout status -n #{app}")
133
+ rollout_wait_command =
134
+ if deployment == :all
135
+ "kubectl get deployments -n #{app} -o name | xargs -n1 -P10 kubectl rollout status -n #{app}"
136
+ else
137
+ "kubectl rollout status -n #{app} deployments/#{app}-#{deployment}"
138
+ end
139
+ exit 1 unless system(rollout_wait_command)
125
140
  end
126
141
  end
127
142
  end
data/lib/seira/db.rb CHANGED
@@ -47,7 +47,7 @@ module Seira
47
47
 
48
48
  # NOTE: Relies on the pgbouncer instance being named based on the db name, as is done in create command
49
49
  def primary_instance
50
- database_url = Helpers.get_secret(app: app, key: 'DATABASE_URL')
50
+ database_url = Helpers.get_secret(context: context, key: 'DATABASE_URL')
51
51
  return nil unless database_url
52
52
 
53
53
  primary_uri = URI.parse(database_url)
@@ -105,7 +105,7 @@ module Seira
105
105
  def run_connect
106
106
  name = args[0] || primary_instance
107
107
  puts "Connecting to #{name}..."
108
- root_password = Helpers.get_secret(app: app, key: "#{name.tr('-', '_').upcase}_ROOT_PASSWORD") || "Not found in secrets"
108
+ root_password = Helpers.get_secret(context: context, key: "#{name.tr('-', '_').upcase}_ROOT_PASSWORD") || "Not found in secrets"
109
109
  puts "Your root password for 'postgres' user is: #{root_password}"
110
110
  system("gcloud sql connect #{name}")
111
111
  end
@@ -239,7 +239,7 @@ module Seira
239
239
  # TODO(josh): move pgbouncer naming logic here and in Create to a common location
240
240
  instance_name = primary_instance
241
241
  tier = instance_name.gsub("#{app}-", '')
242
- matching_pods = Helpers.fetch_pods(app: app, filters: { tier: tier })
242
+ matching_pods = Helpers.fetch_pods(context: context, filters: { tier: tier })
243
243
  if matching_pods.empty?
244
244
  puts 'Could not find pgbouncer pod to connect to'
245
245
  exit 1
@@ -247,7 +247,7 @@ module Seira
247
247
  pod_name = matching_pods.first['metadata']['name']
248
248
  psql_command =
249
249
  if as_admin
250
- root_password = Helpers.get_secret(app: app, key: "#{instance_name.tr('-', '_').upcase}_ROOT_PASSWORD")
250
+ root_password = Helpers.get_secret(context: context, key: "#{instance_name.tr('-', '_').upcase}_ROOT_PASSWORD")
251
251
  "psql postgres://postgres:#{root_password}@127.0.0.1:5432"
252
252
  else
253
253
  'psql'
@@ -161,12 +161,12 @@ module Seira
161
161
  create_pgbouncer_secret(db_user: 'proxyuser', db_password: proxyuser_password)
162
162
  Secrets.new(app: app, action: 'set', args: ["#{env_name}_ROOT_PASSWORD=#{root_password}"], context: context).run
163
163
  # Set DATABASE_URL if not already set
164
- write_database_env(key: "DATABASE_URL", db_user: 'proxyuser', db_password: proxyuser_password) if Helpers.get_secret(app: app, key: "DATABASE_URL").nil?
164
+ write_database_env(key: "DATABASE_URL", db_user: 'proxyuser', db_password: proxyuser_password) if Helpers.get_secret(context: context, key: "DATABASE_URL").nil?
165
165
  write_database_env(key: "#{env_name}_DB_URL", db_user: 'proxyuser', db_password: proxyuser_password)
166
166
  else
167
167
  # When creating a replica, we cannot manage users on the replica. We must manage the users on the primary, which the replica
168
168
  # inherits. For now we will use the same credentials that the primary uses.
169
- primary_uri = URI.parse(Helpers.get_secret(app: app, key: 'DATABASE_URL'))
169
+ primary_uri = URI.parse(Helpers.get_secret(context: context, key: 'DATABASE_URL'))
170
170
  primary_user = primary_uri.user
171
171
  primary_password = primary_uri.password
172
172
  create_pgbouncer_secret(db_user: primary_user, db_password: primary_password)
@@ -238,7 +238,7 @@ metadata:
238
238
  tier: #{pgbouncer_tier}
239
239
  database: #{name}
240
240
  spec:
241
- replicas: 1
241
+ replicas: 2
242
242
  minReadySeconds: 20
243
243
  strategy:
244
244
  type: RollingUpdate
@@ -281,7 +281,7 @@ spec:
281
281
  resources:
282
282
  requests:
283
283
  cpu: 100m
284
- memory: 300m
284
+ memory: 300Mi
285
285
  - image: gcr.io/cloudsql-docker/gce-proxy:1.11 # Gcloud SQL Proxy
286
286
  name: cloudsql-proxy
287
287
  command:
data/lib/seira/jobs.rb CHANGED
@@ -67,6 +67,7 @@ module Seira
67
67
 
68
68
  if arg == '--async'
69
69
  async = true
70
+ no_delete = true
70
71
  elsif arg == '--no-delete'
71
72
  no_delete = true
72
73
  else
@@ -76,11 +77,6 @@ module Seira
76
77
  args.shift
77
78
  end
78
79
 
79
- if async && !no_delete
80
- puts "Cannot delete Job after running if Job is async, since we don't know when it finishes."
81
- exit(1)
82
- end
83
-
84
80
  # TODO: Configurable CPU and memory by args such as large, small, xlarge.
85
81
  command = args.join(' ')
86
82
  unique_name = "#{app}-run-#{Random.unique_name}"
@@ -112,7 +108,7 @@ module Seira
112
108
  File.open("#{destination}/#{file_name}", 'w') { |file| file.write(new_contents) }
113
109
 
114
110
  kubectl("apply -f #{destination}", context: context)
115
- log_link = Helpers.log_link(context: context, app: app, query: unique_name)
111
+ log_link = Helpers.log_link(context: context, query: unique_name)
116
112
  puts "View logs at: #{log_link}" unless log_link.nil?
117
113
  end
118
114
 
@@ -142,6 +138,9 @@ module Seira
142
138
  print "Job finished with status #{status}. Deleting Job from cluster for cleanup."
143
139
  kubectl("delete job #{unique_name}", context: context)
144
140
  end
141
+
142
+ # If the job did not succeed, exit nonzero so calling scripts know something went wrong
143
+ exit(1) unless status == "succeeded"
145
144
  end
146
145
  end
147
146
  end
data/lib/seira/pods.rb CHANGED
@@ -63,110 +63,89 @@ module Seira
63
63
  end
64
64
 
65
65
  def run_connect
66
- # If a pod name is specified, connect to that pod; otherwise pick a random web pod
67
- target_pod_name = pod_name || Helpers.fetch_pods(app: app, filters: { tier: 'web' }).sample&.dig('metadata', 'name')
66
+ tier = nil
67
+ pod_name = nil
68
+ dedicated = false
69
+ command = 'sh'
68
70
 
69
- if target_pod_name
70
- connect_to_pod(target_pod_name)
71
- else
72
- puts "Could not find web pod to connect to"
73
- end
74
- end
75
-
76
- def run_run
77
- # Set defaults
78
- tier = 'web'
79
- clear_commands = false
80
- detached = false
81
- container_name = app
82
-
83
- # Loop through args and process any that aren't just the command to run
84
- loop do
85
- arg = args.first
86
- if arg.nil?
87
- puts 'Please specify a command to run'
88
- exit(1)
89
- end
90
- break unless arg.start_with? '--'
71
+ args.each do |arg|
91
72
  if arg.start_with? '--tier='
92
73
  tier = arg.split('=')[1]
93
- elsif arg == '--clear-commands'
94
- clear_commands = true
95
- elsif arg == '--detached'
96
- detached = true
97
- elsif arg.start_with? '--container='
98
- container_name = arg.split('=')[1]
74
+ elsif arg.start_with? '--pod='
75
+ pod_name = arg.split('=')[1]
76
+ elsif arg.start_with? '--command='
77
+ command = arg.split('=')[1..-1].join('=')
78
+ elsif arg == '--dedicated'
79
+ dedicated = true
99
80
  else
100
81
  puts "Warning: Unrecognized argument #{arg}"
101
82
  end
102
- args.shift
103
83
  end
104
84
 
105
- # Any remaining args are the command to run
106
- command = args.join(' ')
107
-
108
- # Find a 'template' pod from the proper tier
109
- template_pod = Helpers.fetch_pods(app: app, filters: { tier: tier }).first
110
- if template_pod.nil?
111
- puts "Unable to find #{tier} tier pod to copy config from"
85
+ # If a pod name is specified, connect to that pod
86
+ # If a tier is specified, connect to a random pod from that tier
87
+ # Otherwise connect to a terminal pod
88
+ target_pod = pod_name || Helpers.fetch_pods(context: context, filters: { tier: tier || 'terminal' }).sample
89
+ if target_pod.nil?
90
+ puts 'Could not find pod to connect to'
112
91
  exit(1)
113
92
  end
114
93
 
115
- # Use that template pod's configuration to create a new temporary pod
116
- temp_name = "#{app}-temp-#{Random.unique_name}"
117
- spec = template_pod['spec']
118
- temp_pod = {
119
- apiVersion: template_pod['apiVersion'],
120
- kind: 'Pod',
121
- spec: spec,
122
- metadata: {
123
- name: temp_name
94
+ if dedicated
95
+ # Create a dedicated temp pod to run in
96
+ # This is useful if you would like to have a persistent connection that doesn't get killed
97
+ # when someone updates the terminal deployment, or if you want to avoid noisy neighbors
98
+ # connected to the same pod.
99
+ temp_name = "temp-#{Random.unique_name}"
100
+
101
+ # Construct a spec for the temp pod
102
+ spec = target_pod['spec']
103
+ temp_pod = {
104
+ apiVersion: target_pod['apiVersion'],
105
+ kind: 'Pod',
106
+ spec: spec,
107
+ metadata: {
108
+ name: temp_name
109
+ }
124
110
  }
125
- }
126
- spec['restartPolicy'] = 'Never'
127
- if clear_commands
111
+ # Don't restart the pod when it dies
112
+ spec['restartPolicy'] = 'Never'
113
+ # Overwrite container commands with something that times out, so if the client disconnects
114
+ # there's a limited amount of time that the temp pod is still taking up resources
115
+ # Note that this will break a pods which depends on containers running real commands, but
116
+ # for a simple terminal pod it's fine
128
117
  spec['containers'].each do |container|
129
- container['command'] = ['bash', '-c', 'tail -f /dev/null']
118
+ container['command'] = ['sleep', '86400'] # 86400 seconds = 24 hours
130
119
  end
131
- end
132
120
 
133
- if detached
134
- target_container = spec['containers'].find { |container| container['name'] == container_name }
135
- if target_container.nil?
136
- puts "Could not find container '#{container_name}' to run command in"
121
+ puts 'Creating dedicated pod...'
122
+ unless system("kubectl --namespace=#{app} create -f - <<JSON\n#{temp_pod.to_json}\nJSON")
123
+ puts 'Failed to create dedicated pod'
137
124
  exit(1)
138
125
  end
139
- target_container['command'] = ['bash', '-c', command]
140
- end
141
-
142
- puts "Creating temporary pod #{temp_name}"
143
- unless system("kubectl --namespace=#{app} create -f - <<JSON\n#{temp_pod.to_json}\nJSON")
144
- puts 'Failed to create pod'
145
- exit(1)
146
- end
147
126
 
148
- unless detached
149
- # Check pod status until it's ready to connect to
150
- print 'Waiting for pod to start...'
127
+ print 'Waiting for dedicated pod to start...'
151
128
  loop do
152
- pod = JSON.parse(`kubectl --namespace=#{app} get pods/#{temp_name} -o json`)
129
+ pod = JSON.parse(kubectl("get pods/#{temp_name} -o json", context: context, return_output: true))
153
130
  break if pod['status']['phase'] == 'Running'
154
131
  print '.'
155
132
  sleep 1
156
133
  end
157
134
  print "\n"
158
135
 
159
- # Connect to the pod, running the specified command
160
136
  connect_to_pod(temp_name, command)
161
137
 
162
- # Clean up
163
- unless system("kubectl --namespace=#{app} delete pod #{temp_name}")
164
- puts "Warning: failed to clean up pod #{temp_name}"
138
+ # Clean up on disconnect so temp pod isn't taking up resources
139
+ unless kubectl("delete pods/#{temp_name}", context: context)
140
+ puts 'Failed to delete temp pod'
165
141
  end
142
+ else
143
+ # If we don't need a dedicated pod, it's way easier - just connect to the already running one
144
+ connect_to_pod(target_pod.dig('metadata', 'name'))
166
145
  end
167
146
  end
168
147
 
169
- def connect_to_pod(name, command = 'bash')
148
+ def connect_to_pod(name, command = 'sh')
170
149
  puts "Connecting to #{name}..."
171
150
  system("kubectl exec -ti #{name} --namespace=#{app} -- #{command}")
172
151
  end
data/lib/seira/version.rb CHANGED
@@ -1,3 +1,3 @@
1
1
  module Seira
2
- VERSION = "0.3.7".freeze
2
+ VERSION = "0.4.0".freeze
3
3
  end
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: seira
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.3.7
4
+ version: 0.4.0
5
5
  platform: ruby
6
6
  authors:
7
7
  - Scott Ringwelski
8
8
  autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2018-04-10 00:00:00.000000000 Z
11
+ date: 2018-04-13 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: highline