configgin 0.15.2 → 0.16.0
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +5 -5
- data/Gemfile.lock +1 -1
- data/bin/configgin +6 -7
- data/lib/configgin/version.rb +1 -1
- data/lib/kube_link_generator.rb +19 -23
- metadata +3 -3
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
|
-
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
2
|
+
SHA1:
|
3
|
+
metadata.gz: 43e19eea72947ae70b880640570aea9d8cca1ee0
|
4
|
+
data.tar.gz: cb47f254ee03495870310f5369d7b19adb0df853
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 66e6ec7d3c5c40e9caa6fe64c91bb588d0ed18912433c811e45f1d9ffc13667db6ba9bb9e5aac86440740701d3f1849105b58108caf49d62f9b1671be92c9174
|
7
|
+
data.tar.gz: a53834220ffa572251ad3ed24674eb2620101720a3feadc7410d406574ded1ddf18ac1e94cf41e584ba82783c54ce76cec5fc1c1b2505d39583b25d180113b30
|
data/Gemfile.lock
CHANGED
data/bin/configgin
CHANGED
@@ -65,13 +65,12 @@ job_configs.each do |job, job_config|
|
|
65
65
|
jobs[job] = Job.new(bosh_spec, kube_namespace, kube_client, kube_client_stateful_set)
|
66
66
|
end
|
67
67
|
|
68
|
-
jobs.
|
69
|
-
|
70
|
-
|
71
|
-
|
72
|
-
|
73
|
-
|
74
|
-
end
|
68
|
+
exported_properties = Hash[jobs.map { |name, job| [name, job.exported_properties] }]
|
69
|
+
kube_client.patch_pod(
|
70
|
+
ENV['HOSTNAME'],
|
71
|
+
{ metadata: { annotations: { :'skiff-exported-properties' => exported_properties.to_json } } },
|
72
|
+
kube_namespace
|
73
|
+
)
|
75
74
|
|
76
75
|
jobs.each do |job_name, job|
|
77
76
|
dns_encoder = KubeDNSEncoder.new(job.spec['links'])
|
data/lib/configgin/version.rb
CHANGED
data/lib/kube_link_generator.rb
CHANGED
@@ -37,7 +37,7 @@ class KubeLinkSpecs
|
|
37
37
|
@client.get_pods(namespace: @namespace, label_selector: "skiff-role-name=#{role_name}")
|
38
38
|
end
|
39
39
|
|
40
|
-
def get_pods_for_role(role_name, wait_for_ip
|
40
|
+
def get_pods_for_role(role_name, wait_for_ip)
|
41
41
|
loop do
|
42
42
|
# The 30.times loop exists to print out status messages
|
43
43
|
30.times do
|
@@ -46,34 +46,30 @@ class KubeLinkSpecs
|
|
46
46
|
if wait_for_ip
|
47
47
|
# Wait until all pods have IP addresses and properties
|
48
48
|
break unless pods.all? { |pod| pod.status.podIP }
|
49
|
-
break unless pods.all? { |pod| pod.metadata.annotations[
|
49
|
+
break unless pods.all? { |pod| pod.metadata.annotations['skiff-exported-properties'] }
|
50
50
|
else
|
51
51
|
# We just need one pod with exported properties
|
52
52
|
pods.select! { |pod| pod.status.podIP }
|
53
|
-
pods.select! { |pod| pod.metadata.annotations[
|
53
|
+
pods.select! { |pod| pod.metadata.annotations['skiff-exported-properties'] }
|
54
54
|
end
|
55
55
|
return pods unless pods.empty?
|
56
56
|
end
|
57
57
|
sleep 1
|
58
58
|
end
|
59
|
-
$stdout.puts "Waiting for pods for role #{role_name}
|
59
|
+
$stdout.puts "Waiting for pods for role #{role_name} (at #{Time.now})..."
|
60
60
|
end
|
61
61
|
end
|
62
62
|
|
63
|
-
def get_exported_properties(pod, job)
|
64
|
-
exported_properties = pod.metadata.annotations["skiff-exported-properties-#{job}"]
|
65
|
-
exported_properties.nil? ? {} : JSON.parse(exported_properties)
|
66
|
-
end
|
67
|
-
|
68
63
|
def get_pod_instance_info(pod, job, pods_per_image)
|
69
64
|
index = pod_index(pod.metadata.name)
|
65
|
+
properties = JSON.parse(pod.metadata.annotations['skiff-exported-properties'])
|
70
66
|
{
|
71
67
|
'name' => pod.metadata.name,
|
72
68
|
'index' => index,
|
73
69
|
'id' => pod.metadata.name,
|
74
70
|
'az' => pod.metadata.annotations['failure-domain.beta.kubernetes.io/zone'] || 'az0',
|
75
71
|
'address' => pod.status.podIP,
|
76
|
-
'properties' =>
|
72
|
+
'properties' => properties.fetch(job, {}),
|
77
73
|
'bootstrap' => pods_per_image[pod.metadata.uid] < 2
|
78
74
|
}
|
79
75
|
end
|
@@ -84,11 +80,9 @@ class KubeLinkSpecs
|
|
84
80
|
sets = Hash.new(0)
|
85
81
|
keys = {}
|
86
82
|
pods.each do |pod|
|
87
|
-
|
88
|
-
|
89
|
-
|
90
|
-
keys[pod.metadata.uid] = key
|
91
|
-
end
|
83
|
+
key = pod.status.containerStatuses.map(&:imageID).sort.join("\n")
|
84
|
+
sets[key] += 1
|
85
|
+
keys[pod.metadata.uid] = key
|
92
86
|
end
|
93
87
|
pods.each do |pod|
|
94
88
|
result[pod.metadata.uid] = sets[keys[pod.metadata.uid]]
|
@@ -98,21 +92,23 @@ class KubeLinkSpecs
|
|
98
92
|
|
99
93
|
def get_svc_instance_info(role_name, job)
|
100
94
|
svc = @client.get_service(role_name, @namespace)
|
101
|
-
pod = get_pods_for_role(role_name, false
|
95
|
+
pod = get_pods_for_role(role_name, false).first
|
96
|
+
properties = JSON.parse(pod.metadata.annotations['skiff-exported-properties'])
|
102
97
|
{
|
103
98
|
'name' => svc.metadata.name,
|
104
99
|
'index' => 0, # Completely made up index; there is only ever one service
|
105
100
|
'id' => svc.metadata.name,
|
106
101
|
'az' => pod.metadata.annotations['failure-domain.beta.kubernetes.io/zone'] || 'az0',
|
107
102
|
'address' => svc.spec.clusterIP,
|
108
|
-
'properties' =>
|
103
|
+
'properties' => properties.fetch(job, {}),
|
109
104
|
'bootstrap' => true
|
110
105
|
}
|
111
106
|
end
|
112
107
|
|
113
108
|
def get_statefulset_instance_info(role_name, job)
|
114
109
|
ss = @client_stateful_set.get_stateful_set(role_name, @namespace)
|
115
|
-
pod = get_pods_for_role(role_name, false
|
110
|
+
pod = get_pods_for_role(role_name, false).first
|
111
|
+
properties = JSON.parse(pod.metadata.annotations['skiff-exported-properties'])
|
116
112
|
|
117
113
|
Array.new(ss.spec.replicas) do |i|
|
118
114
|
{
|
@@ -121,7 +117,7 @@ class KubeLinkSpecs
|
|
121
117
|
'id' => ss.metadata.name,
|
122
118
|
'az' => pod.metadata.annotations['failure-domain.beta.kubernetes.io/zone'] || 'az0',
|
123
119
|
'address' => "#{ss.metadata.name}-#{i}.#{ss.spec.serviceName}",
|
124
|
-
'properties' =>
|
120
|
+
'properties' => properties.fetch(job, {}),
|
125
121
|
'bootstrap' => i.zero?
|
126
122
|
}
|
127
123
|
end
|
@@ -146,7 +142,7 @@ class KubeLinkSpecs
|
|
146
142
|
|
147
143
|
if provider['role'] == this_name
|
148
144
|
$stderr.puts "Resolving link #{key} via self provider #{provider}"
|
149
|
-
pods = get_pods_for_role(provider['role'], true
|
145
|
+
pods = get_pods_for_role(provider['role'], true)
|
150
146
|
pods_per_image = get_pods_per_image(pods)
|
151
147
|
instances = pods.map { |p| get_pod_instance_info(p, provider['job'], pods_per_image) }
|
152
148
|
elsif service? provider['role']
|
@@ -160,12 +156,12 @@ class KubeLinkSpecs
|
|
160
156
|
end
|
161
157
|
|
162
158
|
@links[key] = {
|
163
|
-
'address' => "#{provider['role']}.#{ENV['
|
159
|
+
'address' => "#{provider['role']}.#{ENV['KUBERNETES_NAMESPACE']}.svc.#{ENV['KUBERNETES_CLUSTER_DOMAIN']}",
|
164
160
|
'instance_group' => '',
|
165
161
|
'default_network' => '',
|
166
162
|
'deployment_name' => @namespace,
|
167
|
-
'domain' => ENV['
|
168
|
-
'root_domain' => ENV['
|
163
|
+
'domain' => "#{ENV['KUBERNETES_NAMESPACE']}.svc.#{ENV['KUBERNETES_CLUSTER_DOMAIN']}",
|
164
|
+
'root_domain' => "#{ENV['KUBERNETES_NAMESPACE']}.svc.#{ENV['KUBERNETES_CLUSTER_DOMAIN']}",
|
169
165
|
'instances' => instances,
|
170
166
|
'properties' => instances.first['properties']
|
171
167
|
}
|
metadata
CHANGED
@@ -1,14 +1,14 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: configgin
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.
|
4
|
+
version: 0.16.0
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- SUSE
|
8
8
|
autorequire:
|
9
9
|
bindir: bin
|
10
10
|
cert_chain: []
|
11
|
-
date: 2018-
|
11
|
+
date: 2018-06-13 00:00:00.000000000 Z
|
12
12
|
dependencies:
|
13
13
|
- !ruby/object:Gem::Dependency
|
14
14
|
name: bundler
|
@@ -177,7 +177,7 @@ required_rubygems_version: !ruby/object:Gem::Requirement
|
|
177
177
|
version: '0'
|
178
178
|
requirements: []
|
179
179
|
rubyforge_project:
|
180
|
-
rubygems_version: 2.
|
180
|
+
rubygems_version: 2.6.13
|
181
181
|
signing_key:
|
182
182
|
specification_version: 4
|
183
183
|
summary: A simple cli app in Ruby to generate configurations using BOSH ERB templates
|