kube_cluster 0.2.1 → 0.3.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/Gemfile.lock +4 -4
- data/examples/01-basic-redis-pod/manifest.rb +1 -1
- data/examples/02-manifest-with-middleware/manifest.rb +37 -0
- data/examples/02-manifest-with-middleware/middleware/labels.rb +4 -0
- data/examples/02-manifest-with-middleware/middleware/namespace.rb +4 -0
- data/examples/02-manifest-with-middleware/templates/config_map.rb +13 -0
- data/examples/02-manifest-with-middleware/templates/deployment.rb +59 -0
- data/examples/02-manifest-with-middleware/templates/horizontal_pod_autoscaler.rb +30 -0
- data/examples/02-manifest-with-middleware/templates/ingress.rb +38 -0
- data/examples/02-manifest-with-middleware/templates/service.rb +12 -0
- data/examples/{version2 → 03-app-with-database}/demo.rb +2 -2
- data/examples/{version2 → 03-app-with-database}/postgresql.rb +4 -4
- data/examples/{version2 → 03-app-with-database}/ruby_on_rails.rb +1 -1
- data/lib/kube/cluster/manifest.rb +13 -64
- data/lib/kube/cluster/middleware/annotations.rb +32 -0
- data/lib/kube/cluster/middleware/hpa_for_deployment.rb +111 -0
- data/lib/kube/cluster/{manifest/middleware → middleware}/ingress_for_service.rb +36 -34
- data/lib/kube/cluster/middleware/labels.rb +59 -0
- data/lib/kube/cluster/middleware/namespace.rb +31 -0
- data/lib/kube/cluster/middleware/pod_anti_affinity.rb +61 -0
- data/lib/kube/cluster/middleware/resource_preset.rb +64 -0
- data/lib/kube/cluster/middleware/security_context.rb +84 -0
- data/lib/kube/cluster/middleware/service_for_deployment.rb +71 -0
- data/lib/kube/cluster/middleware/stack.rb +43 -0
- data/lib/kube/cluster/middleware.rb +69 -0
- data/lib/kube/cluster/resource.rb +78 -0
- data/lib/kube/cluster/version.rb +1 -1
- data/lib/kube/cluster.rb +21 -0
- metadata +25 -19
- data/examples/database/manifest.rb +0 -238
- data/examples/web-app/manifest.rb +0 -215
- data/lib/kube/cluster/manifest/middleware/annotations.rb +0 -32
- data/lib/kube/cluster/manifest/middleware/hpa_for_deployment.rb +0 -109
- data/lib/kube/cluster/manifest/middleware/labels.rb +0 -59
- data/lib/kube/cluster/manifest/middleware/namespace.rb +0 -31
- data/lib/kube/cluster/manifest/middleware/pod_anti_affinity.rb +0 -61
- data/lib/kube/cluster/manifest/middleware/resource_preset.rb +0 -64
- data/lib/kube/cluster/manifest/middleware/security_context.rb +0 -84
- data/lib/kube/cluster/manifest/middleware/service_for_deployment.rb +0 -69
- data/lib/kube/cluster/manifest/middleware.rb +0 -178
- data/lib/kube/cluster/manifest/stack.rb +0 -56
- /data/examples/{version2 → 03-app-with-database}/helpers.rb +0 -0
- /data/examples/{version2 → 03-app-with-database}/my_app.rb +0 -0
checksums.yaml
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
---
|
|
2
2
|
SHA256:
|
|
3
|
-
metadata.gz:
|
|
4
|
-
data.tar.gz:
|
|
3
|
+
metadata.gz: f35a6fb5b6ece5ffc32651a5ad104c896bc8cb80810c80ecb560a22fe0a59eca
|
|
4
|
+
data.tar.gz: df13e8af081ff1bba21254570f12eef80f82fe2ab47804877e76c0f360c9212c
|
|
5
5
|
SHA512:
|
|
6
|
-
metadata.gz:
|
|
7
|
-
data.tar.gz:
|
|
6
|
+
metadata.gz: cbfd8728fd22e79cc2429dc503cbba4f931da5d2f36e99d210b189863bdaa9fcf21965b13b1af231029d973dfafc03f1d44dba48f0f51eb07ba3d6597d8dfea1
|
|
7
|
+
data.tar.gz: 4dd4cec8ee94d15cce4430e6ee1b6a80a5db1e5aa97995b4aa58c3d5180dc9ac7829dbb4d36e9ba2b55c17ec025de097d0622d808dc24ba9e87ee5fadf4a2478
|
data/Gemfile.lock
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
PATH
|
|
2
2
|
remote: .
|
|
3
3
|
specs:
|
|
4
|
-
kube_cluster (0.2.
|
|
4
|
+
kube_cluster (0.2.1)
|
|
5
5
|
kube_kit (> 0)
|
|
6
6
|
kube_kubectl (~> 2.0.0)
|
|
7
7
|
kube_schema (~> 1.2.0)
|
|
@@ -24,20 +24,20 @@ GEM
|
|
|
24
24
|
prism (>= 1.3.0)
|
|
25
25
|
rdoc (>= 4.0.0)
|
|
26
26
|
reline (>= 0.4.2)
|
|
27
|
-
json (2.19.
|
|
27
|
+
json (2.19.4)
|
|
28
28
|
json_schemer (2.5.0)
|
|
29
29
|
bigdecimal
|
|
30
30
|
hana (~> 1.3)
|
|
31
31
|
regexp_parser (~> 2.0)
|
|
32
32
|
simpleidn (~> 0.2)
|
|
33
33
|
kube_kit (0.2.0)
|
|
34
|
-
kube_kubectl (2.0.
|
|
34
|
+
kube_kubectl (2.0.2)
|
|
35
35
|
debug (~> 1.11)
|
|
36
36
|
json_schemer (~> 2.5)
|
|
37
37
|
rubyshell (~> 1.5)
|
|
38
38
|
shellwords (~> 0.2.2)
|
|
39
39
|
string_builder (~> 1.2.0)
|
|
40
|
-
kube_schema (1.2.
|
|
40
|
+
kube_schema (1.2.3)
|
|
41
41
|
black_hole_struct (~> 0.1)
|
|
42
42
|
json_schemer (~> 2.5)
|
|
43
43
|
rubyshell (~> 1.5)
|
|
@@ -0,0 +1,37 @@
|
|
|
1
|
+
require_relative 'templates/config_map'
|
|
2
|
+
require_relative 'templates/deployment'
|
|
3
|
+
require_relative 'templates/ingress'
|
|
4
|
+
require_relative 'templates/service'
|
|
5
|
+
require_relative 'templates/horizontal_pod_autoscaler'
|
|
6
|
+
|
|
7
|
+
require_relative 'middlware/labels'
|
|
8
|
+
require_relative 'middlware/namespace'
|
|
9
|
+
|
|
10
|
+
class MyApp < Kube::Schema::Manifest
|
|
11
|
+
stack do
|
|
12
|
+
use Middleware::Namespace
|
|
13
|
+
use Middleware::Labels
|
|
14
|
+
end
|
|
15
|
+
end
|
|
16
|
+
|
|
17
|
+
puts MyApp.new(
|
|
18
|
+
Templates::ConfigMap.new {
|
|
19
|
+
# no overrides today
|
|
20
|
+
},
|
|
21
|
+
|
|
22
|
+
Templates::Deployment.new {
|
|
23
|
+
# no overrides today
|
|
24
|
+
},
|
|
25
|
+
|
|
26
|
+
Templates::Ingress.new {
|
|
27
|
+
# no overrides today
|
|
28
|
+
},
|
|
29
|
+
|
|
30
|
+
Templates::Service.new {
|
|
31
|
+
# no overrides today
|
|
32
|
+
},
|
|
33
|
+
|
|
34
|
+
Templates::HorizontalPodScaler.new {
|
|
35
|
+
# no overrides today
|
|
36
|
+
},
|
|
37
|
+
).to_yaml
|
|
@@ -0,0 +1,59 @@
|
|
|
1
|
+
class Deployment < Kube::Cluster["Deployment"]
|
|
2
|
+
def initialize(namespace:)
|
|
3
|
+
build {
|
|
4
|
+
metadata.name = namespace
|
|
5
|
+
|
|
6
|
+
spec.replicas = 3
|
|
7
|
+
spec.selector.matchLabels = MATCH_LABELS
|
|
8
|
+
|
|
9
|
+
spec.template.metadata.labels = STANDARD_LABELS
|
|
10
|
+
spec.template.metadata.annotations = {
|
|
11
|
+
# Checksum pattern from _utils.tpl -- triggers rolling restart on config change
|
|
12
|
+
"checksum/config": "{{ sha256sum of configmap data }}",
|
|
13
|
+
}
|
|
14
|
+
|
|
15
|
+
spec.template.spec.containers = [
|
|
16
|
+
{
|
|
17
|
+
name: APP_NAME,
|
|
18
|
+
image: IMAGE,
|
|
19
|
+
ports: [{ name: "http", containerPort: 3000, protocol: "TCP" }],
|
|
20
|
+
resources: RESOURCES,
|
|
21
|
+
env: [
|
|
22
|
+
{ name: "PORT", value: "3000" },
|
|
23
|
+
],
|
|
24
|
+
envFrom: [
|
|
25
|
+
{ configMapRef: { name: "#{FULLNAME}-config" } },
|
|
26
|
+
],
|
|
27
|
+
livenessProbe: {
|
|
28
|
+
httpGet: { path: "/healthz", port: "http" },
|
|
29
|
+
initialDelaySeconds: 15,
|
|
30
|
+
periodSeconds: 10,
|
|
31
|
+
},
|
|
32
|
+
readinessProbe: {
|
|
33
|
+
httpGet: { path: "/readyz", port: "http" },
|
|
34
|
+
initialDelaySeconds: 5,
|
|
35
|
+
periodSeconds: 5,
|
|
36
|
+
},
|
|
37
|
+
},
|
|
38
|
+
]
|
|
39
|
+
|
|
40
|
+
# Pod anti-affinity (from _affinities.tpl)
|
|
41
|
+
# Soft anti-affinity: prefer spreading pods across nodes but don't enforce it
|
|
42
|
+
spec.template.spec.affinity = {
|
|
43
|
+
podAntiAffinity: {
|
|
44
|
+
preferredDuringSchedulingIgnoredDuringExecution: [
|
|
45
|
+
{
|
|
46
|
+
weight: 1,
|
|
47
|
+
podAffinityTerm: {
|
|
48
|
+
labelSelector: {
|
|
49
|
+
matchLabels: MATCH_LABELS,
|
|
50
|
+
},
|
|
51
|
+
topologyKey: "kubernetes.io/hostname",
|
|
52
|
+
},
|
|
53
|
+
},
|
|
54
|
+
],
|
|
55
|
+
},
|
|
56
|
+
}
|
|
57
|
+
}
|
|
58
|
+
end
|
|
59
|
+
end
|
|
@@ -0,0 +1,30 @@
|
|
|
1
|
+
class HorizontalPodAutoscaler < Kube::Cluster["HorizontalPodAutoscaler"]
|
|
2
|
+
def initialize(namespace:)
|
|
3
|
+
build {
|
|
4
|
+
metadata.name = namespace
|
|
5
|
+
|
|
6
|
+
spec.scaleTargetRef.apiVersion = "apps/v1"
|
|
7
|
+
spec.scaleTargetRef.kind = "Deployment"
|
|
8
|
+
spec.scaleTargetRef.name = namespace
|
|
9
|
+
|
|
10
|
+
spec.minReplicas = 3
|
|
11
|
+
spec.maxReplicas = 10
|
|
12
|
+
spec.metrics = [
|
|
13
|
+
{
|
|
14
|
+
type: "Resource",
|
|
15
|
+
resource: {
|
|
16
|
+
name: "cpu",
|
|
17
|
+
target: { type: "Utilization", averageUtilization: 75 },
|
|
18
|
+
},
|
|
19
|
+
},
|
|
20
|
+
{
|
|
21
|
+
type: "Resource",
|
|
22
|
+
resource: {
|
|
23
|
+
name: "memory",
|
|
24
|
+
target: { type: "Utilization", averageUtilization: 80 },
|
|
25
|
+
},
|
|
26
|
+
},
|
|
27
|
+
]
|
|
28
|
+
}
|
|
29
|
+
end
|
|
30
|
+
end
|
|
@@ -0,0 +1,38 @@
|
|
|
1
|
+
class Ingress < Kube::Cluster["Ingress"]
|
|
2
|
+
def initialize(namespace:)
|
|
3
|
+
build {
|
|
4
|
+
metadata.name = namespace
|
|
5
|
+
metadata.annotations = {
|
|
6
|
+
"cert-manager.io/cluster-issuer": "letsencrypt-prod",
|
|
7
|
+
"nginx.ingress.kubernetes.io/ssl-redirect": "true",
|
|
8
|
+
}
|
|
9
|
+
|
|
10
|
+
spec.ingressClassName = "nginx"
|
|
11
|
+
spec.tls = [
|
|
12
|
+
{
|
|
13
|
+
hosts: ["app.example.com"],
|
|
14
|
+
secretName: "#{namespace}-tls",
|
|
15
|
+
},
|
|
16
|
+
]
|
|
17
|
+
spec.rules = [
|
|
18
|
+
{
|
|
19
|
+
host: "app.example.com",
|
|
20
|
+
http: {
|
|
21
|
+
paths: [
|
|
22
|
+
{
|
|
23
|
+
path: "/",
|
|
24
|
+
pathType: "Prefix",
|
|
25
|
+
backend: {
|
|
26
|
+
service: {
|
|
27
|
+
name: FULLNAME,
|
|
28
|
+
port: { name: "http" },
|
|
29
|
+
},
|
|
30
|
+
},
|
|
31
|
+
},
|
|
32
|
+
],
|
|
33
|
+
},
|
|
34
|
+
},
|
|
35
|
+
]
|
|
36
|
+
}
|
|
37
|
+
end
|
|
38
|
+
end
|
|
@@ -44,12 +44,12 @@ app = MyApp.new("example.com", size: :small) do |m|
|
|
|
44
44
|
# Middleware injects: resource limits, security context, anti-affinity, labels
|
|
45
45
|
|
|
46
46
|
[
|
|
47
|
-
Kube::
|
|
47
|
+
Kube::Cluster["Namespace"].new {
|
|
48
48
|
metadata.name = ns
|
|
49
49
|
metadata.labels = labels
|
|
50
50
|
},
|
|
51
51
|
|
|
52
|
-
Kube::
|
|
52
|
+
Kube::Cluster["ConfigMap"].new {
|
|
53
53
|
metadata.name = "#{name}-config"
|
|
54
54
|
metadata.namespace = ns
|
|
55
55
|
metadata.labels = labels
|
|
@@ -10,7 +10,7 @@ class Postgresql < Kube::Cluster::Manifest
|
|
|
10
10
|
self << Secret.new
|
|
11
11
|
end
|
|
12
12
|
|
|
13
|
-
class StatefulSet < Kube::
|
|
13
|
+
class StatefulSet < Kube::Cluster["StatefulSet"]
|
|
14
14
|
metadata.name = db_name
|
|
15
15
|
metadata.namespace = db_ns
|
|
16
16
|
metadata.labels = db_labels
|
|
@@ -55,12 +55,12 @@ class Postgresql < Kube::Cluster::Manifest
|
|
|
55
55
|
]
|
|
56
56
|
end
|
|
57
57
|
|
|
58
|
-
class Namespace < Kube::
|
|
58
|
+
class Namespace < Kube::Cluster["Namespace"]
|
|
59
59
|
metadata.name = db_ns
|
|
60
60
|
metadata.labels = db_labels.reject { |k, _| k == :"app.kubernetes.io/component" }
|
|
61
61
|
end
|
|
62
62
|
|
|
63
|
-
class Secret < Kube::
|
|
63
|
+
class Secret < Kube::Cluster["Secret"]
|
|
64
64
|
metadata.name = db_name
|
|
65
65
|
metadata.namespace = db_ns
|
|
66
66
|
metadata.labels = db_labels
|
|
@@ -70,7 +70,7 @@ class Postgresql < Kube::Cluster::Manifest
|
|
|
70
70
|
|
|
71
71
|
# Headless service for StatefulSet DNS — explicit because the
|
|
72
72
|
# middleware-generated Service is a regular ClusterIP service.
|
|
73
|
-
class Service < Kube::
|
|
73
|
+
class Service < Kube::Cluster["Service"]
|
|
74
74
|
metadata.name = "#{db_name}-headless"
|
|
75
75
|
metadata.namespace = db_ns
|
|
76
76
|
metadata.labels = db_labels
|
|
@@ -1,76 +1,25 @@
|
|
|
1
1
|
# frozen_string_literal: true
|
|
2
2
|
|
|
3
|
-
require_relative "manifest/stack"
|
|
4
|
-
require_relative "manifest/middleware"
|
|
5
|
-
require_relative "manifest/middleware/namespace"
|
|
6
|
-
require_relative "manifest/middleware/labels"
|
|
7
|
-
require_relative "manifest/middleware/annotations"
|
|
8
|
-
require_relative "manifest/middleware/resource_preset"
|
|
9
|
-
require_relative "manifest/middleware/security_context"
|
|
10
|
-
require_relative "manifest/middleware/pod_anti_affinity"
|
|
11
|
-
require_relative "manifest/middleware/service_for_deployment"
|
|
12
|
-
require_relative "manifest/middleware/ingress_for_service"
|
|
13
|
-
require_relative "manifest/middleware/hpa_for_deployment"
|
|
14
|
-
|
|
15
3
|
module Kube
|
|
16
4
|
module Cluster
|
|
17
|
-
# A
|
|
18
|
-
#
|
|
19
|
-
#
|
|
5
|
+
# A flat, ordered collection of Kubernetes resources.
|
|
6
|
+
#
|
|
7
|
+
# Manifest is a pure resource collection. Middleware is applied
|
|
8
|
+
# separately via Kube::Cluster::Middleware::Stack.
|
|
20
9
|
#
|
|
21
|
-
#
|
|
22
|
-
#
|
|
23
|
-
#
|
|
24
|
-
#
|
|
25
|
-
#
|
|
26
|
-
#
|
|
10
|
+
# manifest = Kube::Cluster::Manifest.new
|
|
11
|
+
# manifest << Kube::Cluster["Deployment"].new { ... }
|
|
12
|
+
#
|
|
13
|
+
# stack = Kube::Cluster::Middleware::Stack.new do
|
|
14
|
+
# use Middleware::Namespace, "production"
|
|
15
|
+
# use Middleware::Labels, app: "web-app"
|
|
27
16
|
# end
|
|
28
17
|
#
|
|
29
|
-
#
|
|
30
|
-
#
|
|
31
|
-
# app.to_yaml # resources have been transformed by the stack
|
|
18
|
+
# stack.call(manifest)
|
|
19
|
+
# manifest.to_yaml
|
|
32
20
|
#
|
|
33
21
|
class Manifest < Kube::Schema::Manifest
|
|
34
|
-
|
|
35
|
-
#
|
|
36
|
-
# stack do
|
|
37
|
-
# use Middleware::ResourcePreset
|
|
38
|
-
# use Middleware::SecurityContext
|
|
39
|
-
# end
|
|
40
|
-
#
|
|
41
|
-
def self.stack(&block)
|
|
42
|
-
@stack = Stack.new(&block)
|
|
43
|
-
end
|
|
44
|
-
|
|
45
|
-
# Enumerate resources after passing them through the middleware
|
|
46
|
-
# stack. The entire manifest is passed to the stack so that
|
|
47
|
-
# generative middleware can introduce new resources that
|
|
48
|
-
# subsequent stages will see and process.
|
|
49
|
-
#
|
|
50
|
-
# Every method that reads the manifest (to_yaml, to_a, map,
|
|
51
|
-
# select, etc.) goes through here.
|
|
52
|
-
def each(&block)
|
|
53
|
-
return enum_for(:each) unless block
|
|
54
|
-
|
|
55
|
-
stack = self.class.instance_variable_get(:@stack)
|
|
56
|
-
if stack
|
|
57
|
-
stack.call(@resources).each(&block)
|
|
58
|
-
else
|
|
59
|
-
@resources.each(&block)
|
|
60
|
-
end
|
|
61
|
-
end
|
|
62
|
-
|
|
63
|
-
# Override to_yaml so it renders through the middleware stack.
|
|
64
|
-
# The parent class accesses @resources directly, bypassing each.
|
|
65
|
-
def to_yaml
|
|
66
|
-
map { |r| r.to_yaml }.join("")
|
|
67
|
-
end
|
|
68
|
-
|
|
69
|
-
# Override to_a so it returns middleware-processed resources.
|
|
70
|
-
# The parent class returns @resources.dup directly.
|
|
71
|
-
def to_a
|
|
72
|
-
map(&:itself)
|
|
73
|
-
end
|
|
22
|
+
attr_reader :resources
|
|
74
23
|
end
|
|
75
24
|
end
|
|
76
25
|
end
|
|
@@ -0,0 +1,32 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module Kube
|
|
4
|
+
module Cluster
|
|
5
|
+
class Middleware
|
|
6
|
+
# Merges annotations into +metadata.annotations+ on every resource.
|
|
7
|
+
# Existing annotations are preserved; the supplied annotations act
|
|
8
|
+
# as defaults that can be overridden per-resource.
|
|
9
|
+
#
|
|
10
|
+
# stack do
|
|
11
|
+
# use Middleware::Annotations,
|
|
12
|
+
# "prometheus.io/scrape": "true",
|
|
13
|
+
# "prometheus.io/port": "9090"
|
|
14
|
+
# end
|
|
15
|
+
#
|
|
16
|
+
class Annotations < Middleware
|
|
17
|
+
def initialize(**annotations)
|
|
18
|
+
@annotations = annotations.transform_keys(&:to_sym).transform_values(&:to_s)
|
|
19
|
+
end
|
|
20
|
+
|
|
21
|
+
def call(manifest)
|
|
22
|
+
manifest.resources.map! do |resource|
|
|
23
|
+
h = resource.to_h
|
|
24
|
+
h[:metadata] ||= {}
|
|
25
|
+
h[:metadata][:annotations] = @annotations.merge(h[:metadata][:annotations] || {})
|
|
26
|
+
resource.rebuild(h)
|
|
27
|
+
end
|
|
28
|
+
end
|
|
29
|
+
end
|
|
30
|
+
end
|
|
31
|
+
end
|
|
32
|
+
end
|
|
@@ -0,0 +1,111 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module Kube
|
|
4
|
+
module Cluster
|
|
5
|
+
class Middleware
|
|
6
|
+
# Generates a HorizontalPodAutoscaler for every pod-bearing
|
|
7
|
+
# resource that carries the +app.kubernetes.io/autoscale+ label.
|
|
8
|
+
#
|
|
9
|
+
# The label value encodes the min and max replicas as "min-max":
|
|
10
|
+
#
|
|
11
|
+
# metadata.labels = { "app.kubernetes.io/autoscale": "1-5" }
|
|
12
|
+
#
|
|
13
|
+
# Options:
|
|
14
|
+
# cpu: — target CPU utilization percentage (default: 75)
|
|
15
|
+
# memory: — target memory utilization percentage (default: 80)
|
|
16
|
+
#
|
|
17
|
+
# stack do
|
|
18
|
+
# use Middleware::HPAForDeployment
|
|
19
|
+
# use Middleware::HPAForDeployment, cpu: 60, memory: 70
|
|
20
|
+
# end
|
|
21
|
+
#
|
|
22
|
+
class HPAForDeployment < Middleware
|
|
23
|
+
LABEL = :"app.kubernetes.io/autoscale"
|
|
24
|
+
|
|
25
|
+
def initialize(cpu: 75, memory: 80)
|
|
26
|
+
@cpu = cpu
|
|
27
|
+
@memory = memory
|
|
28
|
+
end
|
|
29
|
+
|
|
30
|
+
def call(manifest)
|
|
31
|
+
generated = []
|
|
32
|
+
|
|
33
|
+
manifest.resources.each do |resource|
|
|
34
|
+
next unless resource.pod_bearing?
|
|
35
|
+
|
|
36
|
+
value = resource.label(LABEL)
|
|
37
|
+
next unless value
|
|
38
|
+
|
|
39
|
+
min, max = parse_range(value)
|
|
40
|
+
|
|
41
|
+
h = resource.to_h
|
|
42
|
+
name = h.dig(:metadata, :name)
|
|
43
|
+
namespace = h.dig(:metadata, :namespace)
|
|
44
|
+
labels = h.dig(:metadata, :labels) || {}
|
|
45
|
+
api_version = h[:apiVersion] || "apps/v1"
|
|
46
|
+
resource_kind = resource.kind
|
|
47
|
+
|
|
48
|
+
# Capture ivars as locals — the block runs via instance_exec
|
|
49
|
+
# on a BlackHoleStruct, so @ivars would resolve on the BHS.
|
|
50
|
+
cpu_target = @cpu
|
|
51
|
+
memory_target = @memory
|
|
52
|
+
|
|
53
|
+
generated << Kube::Cluster["HorizontalPodAutoscaler"].new {
|
|
54
|
+
metadata.name = name
|
|
55
|
+
metadata.namespace = namespace if namespace
|
|
56
|
+
metadata.labels = labels.reject { |k, _| k == LABEL }
|
|
57
|
+
|
|
58
|
+
spec.scaleTargetRef = {
|
|
59
|
+
apiVersion: api_version,
|
|
60
|
+
kind: resource_kind,
|
|
61
|
+
name: name,
|
|
62
|
+
}
|
|
63
|
+
spec.minReplicas = min
|
|
64
|
+
spec.maxReplicas = max
|
|
65
|
+
spec.metrics = [
|
|
66
|
+
{
|
|
67
|
+
type: "Resource",
|
|
68
|
+
resource: {
|
|
69
|
+
name: "cpu",
|
|
70
|
+
target: { type: "Utilization", averageUtilization: cpu_target },
|
|
71
|
+
},
|
|
72
|
+
},
|
|
73
|
+
{
|
|
74
|
+
type: "Resource",
|
|
75
|
+
resource: {
|
|
76
|
+
name: "memory",
|
|
77
|
+
target: { type: "Utilization", averageUtilization: memory_target },
|
|
78
|
+
},
|
|
79
|
+
},
|
|
80
|
+
]
|
|
81
|
+
}
|
|
82
|
+
end
|
|
83
|
+
|
|
84
|
+
manifest.resources.concat(generated)
|
|
85
|
+
end
|
|
86
|
+
|
|
87
|
+
private
|
|
88
|
+
|
|
89
|
+
def parse_range(value)
|
|
90
|
+
parts = value.to_s.split("-", 2)
|
|
91
|
+
|
|
92
|
+
unless parts.length == 2
|
|
93
|
+
raise ArgumentError,
|
|
94
|
+
"Invalid autoscale label: #{value.inspect}. Expected format: \"min-max\" (e.g. \"1-5\")"
|
|
95
|
+
end
|
|
96
|
+
|
|
97
|
+
min = Integer(parts[0])
|
|
98
|
+
max = Integer(parts[1])
|
|
99
|
+
|
|
100
|
+
unless min > 0 && max >= min
|
|
101
|
+
raise ArgumentError,
|
|
102
|
+
"Invalid autoscale range: min=#{min}, max=#{max}. " \
|
|
103
|
+
"min must be > 0 and max must be >= min."
|
|
104
|
+
end
|
|
105
|
+
|
|
106
|
+
[min, max]
|
|
107
|
+
end
|
|
108
|
+
end
|
|
109
|
+
end
|
|
110
|
+
end
|
|
111
|
+
end
|
|
@@ -2,40 +2,42 @@
|
|
|
2
2
|
|
|
3
3
|
module Kube
|
|
4
4
|
module Cluster
|
|
5
|
-
class
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
LABEL = :"app.kubernetes.io/expose"
|
|
5
|
+
class Middleware
|
|
6
|
+
# Generates an Ingress for every Service whose source resource
|
|
7
|
+
# carries the +app.kubernetes.io/expose+ label.
|
|
8
|
+
#
|
|
9
|
+
# The label value is the hostname:
|
|
10
|
+
#
|
|
11
|
+
# metadata.labels = { "app.kubernetes.io/expose": "app.example.com" }
|
|
12
|
+
#
|
|
13
|
+
# Set to +"true"+ to use the resource name as a hostname placeholder
|
|
14
|
+
# (useful when a later middleware or the manifest class resolves it).
|
|
15
|
+
#
|
|
16
|
+
# Options:
|
|
17
|
+
# issuer: — cert-manager ClusterIssuer name (default: "letsencrypt-prod")
|
|
18
|
+
# ingress_class: — IngressClassName (default: "nginx")
|
|
19
|
+
#
|
|
20
|
+
# stack do
|
|
21
|
+
# use Middleware::IngressForService
|
|
22
|
+
# use Middleware::IngressForService, issuer: "letsencrypt-staging"
|
|
23
|
+
# end
|
|
24
|
+
#
|
|
25
|
+
class IngressForService < Middleware
|
|
26
|
+
LABEL = :"app.kubernetes.io/expose"
|
|
28
27
|
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
28
|
+
def initialize(issuer: "letsencrypt-prod", ingress_class: "nginx")
|
|
29
|
+
@issuer = issuer
|
|
30
|
+
@ingress_class = ingress_class
|
|
31
|
+
end
|
|
32
|
+
|
|
33
|
+
def call(manifest)
|
|
34
|
+
generated = []
|
|
33
35
|
|
|
34
|
-
|
|
35
|
-
|
|
36
|
+
manifest.resources.each do |resource|
|
|
37
|
+
next unless resource.kind == "Service"
|
|
36
38
|
|
|
37
|
-
host = label(
|
|
38
|
-
|
|
39
|
+
host = resource.label(LABEL)
|
|
40
|
+
next unless host
|
|
39
41
|
|
|
40
42
|
h = resource.to_h
|
|
41
43
|
name = h.dig(:metadata, :name)
|
|
@@ -53,7 +55,7 @@ module Kube
|
|
|
53
55
|
issuer = @issuer
|
|
54
56
|
ingress_class = @ingress_class
|
|
55
57
|
|
|
56
|
-
|
|
58
|
+
generated << Kube::Cluster["Ingress"].new {
|
|
57
59
|
metadata.name = name
|
|
58
60
|
metadata.namespace = namespace if namespace
|
|
59
61
|
metadata.labels = labels.reject { |k, _| k == LABEL }
|
|
@@ -79,9 +81,9 @@ module Kube
|
|
|
79
81
|
},
|
|
80
82
|
]
|
|
81
83
|
}
|
|
82
|
-
|
|
83
|
-
[resource, ingress]
|
|
84
84
|
end
|
|
85
|
+
|
|
86
|
+
manifest.resources.concat(generated)
|
|
85
87
|
end
|
|
86
88
|
end
|
|
87
89
|
end
|