configgin 0.19.1 → 0.19.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 1bee5bef106cbdbfe6621409660761a4308781e779ecd28a6ab26244152b98cf
4
- data.tar.gz: e330eedc131bef6ba4afb3803539c6e1b1424fabd70ef60a65a3abf609925577
3
+ metadata.gz: '0954b0d8fe5cfb4809934ffe2320c96e93e95cdd20e89f590f0714ca8b5e717c'
4
+ data.tar.gz: 84e9ff63af4eb3af8f33e80d740bef408b762fa163266ddb849e689f3126adcd
5
5
  SHA512:
6
- metadata.gz: e5a9774eea477d8a6741850e7314bccffcefac603e530ced7526c6d20a64acade710b3984dd46664eff03612c8c2003994a6f3cd5c4f8846f85d7f1cf8752b72
7
- data.tar.gz: e0578301581963b1a2db5dff6fd705104d89c3369e024a607c000e8f904925aba92f090e5f5ff0ee8d9973a19884f1a70b76a10fc8c97bbe38b44950be1db20a
6
+ metadata.gz: 2c89cd26245c367e7181067eed17764d9c43eb5cdd2c1ee990ae367e39dc69d82d3bf2f94755e58278903da78fc1c5c1ef45c9b4b1faf041c86d56eda81f6bc9
7
+ data.tar.gz: 0e1fef57272d7cce1b56101a1d088e99bf100e0adabbf929428e5ff196243be600c99c55a34eff9a192430326429e39004bfe403678e3c25d7e4e49e9ed03075
data/Gemfile.lock CHANGED
@@ -1,7 +1,7 @@
1
1
  PATH
2
2
  remote: .
3
3
  specs:
4
- configgin (0.19.1)
4
+ configgin (0.19.2)
5
5
  bosh-template (~> 2.0)
6
6
  deep_merge (~> 1.1)
7
7
  kubeclient (~> 2.0)
data/lib/configgin.rb CHANGED
@@ -24,9 +24,8 @@ class Configgin
24
24
 
25
25
  def run
26
26
  jobs = generate_jobs(@job_configs, @templates)
27
- job_digests = patch_job_metadata(jobs)
27
+ export_job_properties(jobs)
28
28
  render_job_templates(jobs, @job_configs)
29
- restart_affected_pods expected_annotations(@job_configs, job_digests)
30
29
  end
31
30
 
32
31
  def generate_jobs(job_configs, templates)
@@ -58,73 +57,86 @@ class Configgin
58
57
  jobs
59
58
  end
60
59
 
61
- # Set the exported properties and their digests, and return the digests.
62
- def patch_job_metadata(jobs)
63
- pod = kube_client.get_pod(@self_name, kube_namespace)
60
+ def render_job_templates(jobs, job_configs)
61
+ jobs.each do |job_name, job|
62
+ dns_encoder = KubeDNSEncoder.new(job.spec['links'])
63
+
64
+ job_configs[job_name]['files'].each do |infile, outfile|
65
+ job.generate(infile, outfile, dns_encoder)
66
+ end
67
+ end
68
+ end
69
+
70
+ # Write exported properties to secret and potentially restart affected pods.
71
+ def export_job_properties(jobs)
72
+ # co-located containers don't get to export properties
73
+ return unless instance_group == ENV["KUBERNETES_CONTAINER_NAME"]
74
+ # jobs don't export properties
75
+ return unless self_pod['metadata']['ownerReferences'][0]['kind'] == "StatefulSet"
64
76
 
77
+ sts = kube_client_stateful_set.get_stateful_set(instance_group, kube_namespace)
78
+
79
+ # Make sure the secret attached to the stateful set exists.
80
+ # XXX This should probably be done by fissile via the helm chart.
65
81
  secret = Kubeclient::Resource.new
66
- secret.metadata = {}
67
- # Prefixing with pod.metadata.name is purely for human convenience/debugging.
68
- secret.metadata.name = "#{pod.metadata.name}-#{pod.metadata.uid}"
69
- secret.metadata.namespace = kube_namespace
70
-
71
- # Make sure the secret gets removed when the pod is deleted.
72
- secret.metadata.ownerReferences = [
73
- {
74
- apiVersion: pod.apiVersion,
75
- blockOwnerDeletion: false,
76
- controller: false,
77
- kind: pod.kind,
78
- name: pod.metadata.name,
79
- uid: pod.metadata.uid,
80
- }
81
- ]
82
+ secret.metadata = {
83
+ name: sts.metadata.name,
84
+ namespace: kube_namespace,
85
+ ownerReferences: [
86
+ {
87
+ apiVersion: sts.apiVersion,
88
+ blockOwnerDeletion: false,
89
+ controller: false,
90
+ kind: sts.kind,
91
+ name: sts.metadata.name,
92
+ uid: sts.metadata.uid,
93
+ }
94
+ ]
95
+ }
96
+ begin
97
+ kube_client.create_secret(secret)
98
+ rescue
99
+ end
100
+ secret = kube_client.get_secret(instance_group, kube_namespace)
101
+ secret.data ||= {}
102
+
103
+ version_tag = ENV["CONFIGGIN_VERSION_TAG"]
104
+ new_tag = !secret.data.has_key?(version_tag)
105
+ secret.data = {version_tag => ""} if new_tag # make sure old properties are deleted during upgrade
82
106
 
83
- secret.data = {}
84
107
  digests = {}
85
108
  jobs.each do |name, job|
86
109
  digests[name] = property_digest(job.exported_properties)
87
110
  secret.data["skiff-exported-properties-#{name}"] = Base64.encode64(job.exported_properties.to_json)
88
- secret.data["skiff-exported-digest-#{name}"] = Base64.encode64(digests[name])
89
- end
90
111
 
91
- # Only the main container gets to export properties; colocated sidecars don't.
92
- if instance_group == ENV["KUBERNETES_CONTAINER_NAME"]
93
- begin
94
- kube_client.delete_secret(secret.metadata.name, kube_namespace)
95
- rescue
112
+ encoded_digest = Base64.encode64(digests[name])
113
+
114
+ # Record initial digest values whenever the tag changes, in which case the pod startup
115
+ # order is already controlled by the "CONFIGGIN_IMPORT_#{role}" references to the new
116
+ # tags in the corresponding secrets. There is no annotation when importing this set of
117
+ # initial values because the helm chart doesn't include any annotations, and we don't
118
+ # want to trigger a pod restart by adding them.
119
+ if new_tag
120
+ secret.data["skiff-initial-digest-#{name}"] = encoded_digest
121
+ end
122
+ if secret.data["skiff-initial-digest-#{name}"] == encoded_digest
123
+ digests[name] = nil
96
124
  end
97
- kube_client.create_secret(secret)
98
125
  end
99
126
 
100
- digests
101
- end
127
+ kube_client.update_secret(secret)
102
128
 
103
- def render_job_templates(jobs, job_configs)
104
- jobs.each do |job_name, job|
105
- dns_encoder = KubeDNSEncoder.new(job.spec['links'])
129
+ return if new_tag
106
130
 
107
- job_configs[job_name]['files'].each do |infile, outfile|
108
- job.generate(infile, outfile, dns_encoder)
109
- end
110
- end
111
- end
112
-
113
- # Some pods might have depended on the properties exported by this pod; given
114
- # the annotations expected on the pods (keyed by the instance group name),
115
- # patch the StatefulSets such that they will be restarted.
116
- def restart_affected_pods(expected_annotations)
117
- expected_annotations.each_pair do |instance_group_name, digests|
131
+ # Some pods might have depended on the properties exported by this pod; given
132
+ # the annotations expected on the pods (keyed by the instance group name),
133
+ # patch the StatefulSets such that they will be restarted.
134
+ expected_annotations(@job_configs, digests).each_pair do |instance_group_name, digests|
118
135
  # Avoid restarting our own pod
119
136
  next if instance_group_name == instance_group
120
137
 
121
138
  begin
122
- kube_client_stateful_set.patch_stateful_set(
123
- instance_group_name,
124
- { spec: { template: { metadata: { annotations: digests } } } },
125
- kube_namespace
126
- )
127
- warn "Patched StatefulSet #{instance_group_name} for new exported digests"
139
+ sts = kube_client_stateful_set.get_stateful_set(instance_group_name, kube_namespace)
128
140
  rescue KubeException => e
129
141
  begin
130
142
  begin
@@ -142,6 +154,18 @@ class Configgin
142
154
  raise
143
155
  end
144
156
  end
157
+
158
+ annotations = sts.spec.template.metadata.annotations
159
+ digests.each_pair do |key, value|
160
+ annotations[key] = value
161
+ end
162
+
163
+ kube_client_stateful_set.merge_patch_stateful_set(
164
+ instance_group_name,
165
+ { spec: { template: { metadata: { annotations: annotations } } } },
166
+ kube_namespace
167
+ )
168
+ warn "Patched StatefulSet #{instance_group_name} for new exported digests"
145
169
  end
146
170
  end
147
171
 
@@ -198,8 +222,11 @@ class Configgin
198
222
  @kube_client_stateful_set ||= create_kube_client(path: '/apis/apps')
199
223
  end
200
224
 
201
- def instance_group
225
+ def self_pod
202
226
  @pod ||= kube_client.get_pod(@self_name, kube_namespace)
203
- @pod['metadata']['labels']['app.kubernetes.io/component']
227
+ end
228
+
229
+ def instance_group
230
+ self_pod['metadata']['labels']['app.kubernetes.io/component']
204
231
  end
205
232
  end
@@ -1,3 +1,3 @@
1
1
  class Configgin
2
- VERSION = '0.19.1'.freeze
2
+ VERSION = '0.19.2'.freeze
3
3
  end
@@ -62,19 +62,8 @@ class KubeLinkSpecs
62
62
  30.times do
63
63
  1.times do
64
64
  pods = _get_pods_for_role(role_name, sts_image)
65
- good_pods = pods.select do |pod|
66
- next false unless pod.status.podIP
67
- begin
68
- secret = client.get_secret("#{pod.metadata.name}-#{pod.metadata.uid}", namespace)
69
- next true if secret.data["skiff-exported-properties-#{job}"]
70
-
71
- rescue
72
- pod.metadata.annotations["skiff-exported-properties-#{job}"]
73
- end
74
- end
75
-
65
+ good_pods = pods.select { |pod| pod.status.podIP }
76
66
  if options[:wait_for_all]
77
- # Wait until all pods have IP addresses and properties
78
67
  break unless good_pods.length == pods.length
79
68
  end
80
69
  return good_pods unless good_pods.empty?
@@ -86,43 +75,12 @@ class KubeLinkSpecs
86
75
  end
87
76
  end
88
77
 
89
- def patch_pod_with_imported_properties(role_name, job_name, digest)
90
- client.patch_pod(
91
- ENV['HOSTNAME'],
92
- { metadata: { annotations: { :"skiff-in-props-#{role_name}-#{job_name}" => digest } } },
93
- namespace
94
- )
95
- end
96
-
97
- def get_exported_properties(role_name, pod, job_name)
98
- # Exported properties are stored in a secret linked to the pod by naming convention.
99
- begin
100
- secret = client.get_secret("#{pod.metadata.name}-#{pod.metadata.uid}", namespace)
101
- rescue
102
- end
103
-
104
- if !secret.nil?
105
- digest = secret.data["skiff-exported-digest-#{job_name}"]
106
- # digest not being set only happens during the spec tests???
107
- if digest
108
- # Copy the digest over, so that if the source role changes we can be restarted.
109
- patch_pod_with_imported_properties(role_name, job_name, Base64.decode64(digest))
110
- end
111
- JSON.parse(Base64.decode64(secret.data["skiff-exported-properties-#{job_name}"]))
112
-
113
- # Older implementation stored exported properties in annotations (one per job).
114
- elsif pod.metadata.annotations["skiff-exported-properties-#{job_name}"]
115
- # digest not being set only happens during the spec tests???
116
- if pod.metadata.annotations["skiff-exported-digest-#{job_name}"]
117
- # Copy the digest over, so that if the source role changes we can be restarted.
118
- digest = pod.metadata.annotations["skiff-exported-digest-#{job_name}"]
119
- patch_pod_with_imported_properties(role_name, job_name, digest)
120
- end
121
- JSON.parse(pod.metadata.annotations["skiff-exported-properties-#{job_name}"])
122
-
123
- else
124
- {}
125
- end
78
+ def get_exported_properties(role_name, job_name)
79
+ # Containers are not starting until all the properties they want to import already exist.
80
+ # This is done using the CONFIGGIN_IMPORT_ROLE environment variables referencing the version
81
+ # tag in the corresponding secret.
82
+ secret = client.get_secret(role_name, namespace)
83
+ JSON.parse(Base64.decode64(secret.data["skiff-exported-properties-#{job_name}"]))
126
84
  end
127
85
 
128
86
  def get_pod_instance_info(role_name, pod, job, pods_per_image)
@@ -134,7 +92,7 @@ class KubeLinkSpecs
134
92
  'id' => pod.metadata.name,
135
93
  'az' => pod.metadata.annotations['failure-domain.beta.kubernetes.io/zone'] || 'az0',
136
94
  'address' => "#{pod.metadata.name}.#{pod.spec.subdomain}.#{ENV['KUBERNETES_NAMESPACE']}.svc.#{ENV['KUBERNETES_CLUSTER_DOMAIN']}",
137
- 'properties' => get_exported_properties(role_name, pod, job),
95
+ 'properties' => get_exported_properties(role_name, job),
138
96
  'bootstrap' => pods_per_image[pod.metadata.uid] < 2
139
97
  }
140
98
  end
@@ -164,9 +122,10 @@ class KubeLinkSpecs
164
122
  'name' => svc.metadata.name,
165
123
  'index' => 0, # Completely made up index; there is only ever one service
166
124
  'id' => svc.metadata.name,
125
+ # XXX bogus, but what can we do?
167
126
  'az' => pod.metadata.annotations['failure-domain.beta.kubernetes.io/zone'] || 'az0',
168
127
  'address' => svc.spec.clusterIP,
169
- 'properties' => get_exported_properties(role_name, pod, job),
128
+ 'properties' => get_exported_properties(role_name, job),
170
129
  'bootstrap' => true
171
130
  }
172
131
  end
@@ -182,7 +141,8 @@ class KubeLinkSpecs
182
141
  'id' => ss.metadata.name,
183
142
  'az' => pod.metadata.annotations['failure-domain.beta.kubernetes.io/zone'] || 'az0',
184
143
  'address' => "#{ss.metadata.name}-#{i}.#{ss.spec.serviceName}",
185
- 'properties' => get_exported_properties(role_name, pod, job),
144
+ 'properties' => get_exported_properties(role_name, job),
145
+ # XXX not actually correct during updates
186
146
  'bootstrap' => i.zero?
187
147
  }
188
148
  end
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: configgin
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.19.1
4
+ version: 0.19.2
5
5
  platform: ruby
6
6
  authors:
7
7
  - SUSE
8
8
  autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2019-10-22 00:00:00.000000000 Z
11
+ date: 2019-11-21 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: bundler