kube_cluster 0.2.0 → 0.2.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/.github/workflows/release.yml +43 -0
- data/.github/workflows/tag-gem-version-bump.yml +47 -0
- data/.gitignore +2 -0
- data/Gemfile.lock +48 -52
- data/bin/console +3 -0
- data/bin/dev +4 -0
- data/docker-compose.yml +26 -0
- data/examples/01-basic-redis-pod/manifest.rb +60 -0
- data/examples/database/manifest.rb +238 -0
- data/examples/version2/demo.rb +87 -0
- data/examples/version2/helpers.rb +18 -0
- data/examples/version2/my_app.rb +45 -0
- data/examples/version2/postgresql.rb +81 -0
- data/examples/version2/ruby_on_rails.rb +31 -0
- data/examples/web-app/manifest.rb +215 -0
- data/flake.lock +3 -3
- data/flake.nix +6 -0
- data/kube_cluster.gemspec +3 -1
- data/lib/kube/cli/cluster.rb +41 -0
- data/lib/kube/cluster/connection.rb +18 -0
- data/lib/kube/cluster/instance.rb +21 -0
- data/lib/kube/cluster/manifest/middleware/annotations.rb +32 -0
- data/lib/kube/cluster/manifest/middleware/hpa_for_deployment.rb +109 -0
- data/lib/kube/cluster/manifest/middleware/ingress_for_service.rb +89 -0
- data/lib/kube/cluster/manifest/middleware/labels.rb +59 -0
- data/lib/kube/cluster/manifest/middleware/namespace.rb +31 -0
- data/lib/kube/cluster/manifest/middleware/pod_anti_affinity.rb +61 -0
- data/lib/kube/cluster/manifest/middleware/resource_preset.rb +64 -0
- data/lib/kube/cluster/manifest/middleware/security_context.rb +84 -0
- data/lib/kube/cluster/manifest/middleware/service_for_deployment.rb +69 -0
- data/lib/kube/cluster/manifest/middleware.rb +178 -0
- data/lib/kube/cluster/manifest/stack.rb +56 -0
- data/lib/kube/cluster/manifest.rb +76 -0
- data/lib/kube/cluster/resource/dirty_tracking.rb +113 -0
- data/lib/kube/cluster/resource/persistence.rb +67 -0
- data/lib/kube/cluster/resource.rb +21 -0
- data/lib/kube/cluster/version.rb +1 -1
- data/lib/kube/cluster.rb +13 -7
- data/lib/kube/errors.rb +57 -0
- metadata +63 -17
- data/Rakefile +0 -11
- data/TREE_PLAN.md +0 -513
- data/bin/generate-command-schema-v1 +0 -44
- data/data/kubectl-command-tree-v1-minimal.json +0 -125
- data/data/kubectl-command-tree-v1.json +0 -1469
- data/examples/quick-repl/docker-compose.yml +0 -52
- data/exe/kube_cluster +0 -6
- data/lib/kube/cluster/command_node.rb +0 -89
- data/lib/kube/cluster/ctl.rb +0 -33
- data/lib/kube/cluster/query_builder.rb +0 -35
- data/lib/kube/cluster/resource_selector.rb +0 -19
- data/lib/kube/cluster/tree_node.rb +0 -51
|
@@ -0,0 +1,89 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module Kube
|
|
4
|
+
module Cluster
|
|
5
|
+
class Manifest < Kube::Schema::Manifest
|
|
6
|
+
class Middleware
|
|
7
|
+
# Generates an Ingress for every Service whose source resource
|
|
8
|
+
# carries the +app.kubernetes.io/expose+ label.
|
|
9
|
+
#
|
|
10
|
+
# The label value is the hostname:
|
|
11
|
+
#
|
|
12
|
+
# metadata.labels = { "app.kubernetes.io/expose": "app.example.com" }
|
|
13
|
+
#
|
|
14
|
+
# Set to +"true"+ to use the resource name as a hostname placeholder
|
|
15
|
+
# (useful when a later middleware or the manifest class resolves it).
|
|
16
|
+
#
|
|
17
|
+
# Options:
|
|
18
|
+
# issuer: — cert-manager ClusterIssuer name (default: "letsencrypt-prod")
|
|
19
|
+
# ingress_class: — IngressClassName (default: "nginx")
|
|
20
|
+
#
|
|
21
|
+
# stack do
|
|
22
|
+
# use Middleware::IngressForService
|
|
23
|
+
# use Middleware::IngressForService, issuer: "letsencrypt-staging"
|
|
24
|
+
# end
|
|
25
|
+
#
|
|
26
|
+
class IngressForService < Middleware
|
|
27
|
+
LABEL = :"app.kubernetes.io/expose"
|
|
28
|
+
|
|
29
|
+
def initialize(issuer: "letsencrypt-prod", ingress_class: "nginx")
|
|
30
|
+
@issuer = issuer
|
|
31
|
+
@ingress_class = ingress_class
|
|
32
|
+
end
|
|
33
|
+
|
|
34
|
+
def call(resource)
|
|
35
|
+
return resource unless kind(resource) == "Service"
|
|
36
|
+
|
|
37
|
+
host = label(resource, LABEL)
|
|
38
|
+
return resource unless host
|
|
39
|
+
|
|
40
|
+
h = resource.to_h
|
|
41
|
+
name = h.dig(:metadata, :name)
|
|
42
|
+
namespace = h.dig(:metadata, :namespace)
|
|
43
|
+
labels = h.dig(:metadata, :labels) || {}
|
|
44
|
+
|
|
45
|
+
# Find the first port on the service
|
|
46
|
+
port_name = Array(h.dig(:spec, :ports)).first&.dig(:name) || "http"
|
|
47
|
+
|
|
48
|
+
# Use resource name as hostname fallback if label is just "true"
|
|
49
|
+
host = "#{name}.local" if host == "true"
|
|
50
|
+
|
|
51
|
+
# Capture ivars as locals — the block runs via instance_exec
|
|
52
|
+
# on a BlackHoleStruct, so @ivars would resolve on the BHS.
|
|
53
|
+
issuer = @issuer
|
|
54
|
+
ingress_class = @ingress_class
|
|
55
|
+
|
|
56
|
+
ingress = Kube::Schema["Ingress"].new {
|
|
57
|
+
metadata.name = name
|
|
58
|
+
metadata.namespace = namespace if namespace
|
|
59
|
+
metadata.labels = labels.reject { |k, _| k == LABEL }
|
|
60
|
+
metadata.annotations = {
|
|
61
|
+
"cert-manager.io/cluster-issuer": issuer,
|
|
62
|
+
"nginx.ingress.kubernetes.io/ssl-redirect": "true",
|
|
63
|
+
}
|
|
64
|
+
|
|
65
|
+
spec.ingressClassName = ingress_class
|
|
66
|
+
spec.tls = [
|
|
67
|
+
{ hosts: [host], secretName: "#{name}-tls" },
|
|
68
|
+
]
|
|
69
|
+
spec.rules = [
|
|
70
|
+
{
|
|
71
|
+
host: host,
|
|
72
|
+
http: {
|
|
73
|
+
paths: [{
|
|
74
|
+
path: "/",
|
|
75
|
+
pathType: "Prefix",
|
|
76
|
+
backend: { service: { name: name, port: { name: port_name } } },
|
|
77
|
+
}],
|
|
78
|
+
},
|
|
79
|
+
},
|
|
80
|
+
]
|
|
81
|
+
}
|
|
82
|
+
|
|
83
|
+
[resource, ingress]
|
|
84
|
+
end
|
|
85
|
+
end
|
|
86
|
+
end
|
|
87
|
+
end
|
|
88
|
+
end
|
|
89
|
+
end
|
|
@@ -0,0 +1,59 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module Kube
|
|
4
|
+
module Cluster
|
|
5
|
+
class Manifest < Kube::Schema::Manifest
|
|
6
|
+
class Middleware
|
|
7
|
+
# Merges labels into +metadata.labels+ on every resource.
|
|
8
|
+
# Existing labels are preserved; the supplied labels act as defaults
|
|
9
|
+
# that can be overridden per-resource.
|
|
10
|
+
#
|
|
11
|
+
# stack do
|
|
12
|
+
# use Middleware::Labels, app: "web-app", managed_by: "kube_cluster"
|
|
13
|
+
# end
|
|
14
|
+
#
|
|
15
|
+
# The keyword arguments are converted to standard label keys:
|
|
16
|
+
#
|
|
17
|
+
# app: -> "app.kubernetes.io/name"
|
|
18
|
+
# instance: -> "app.kubernetes.io/instance"
|
|
19
|
+
# version: -> "app.kubernetes.io/version"
|
|
20
|
+
# component: -> "app.kubernetes.io/component"
|
|
21
|
+
# part_of: -> "app.kubernetes.io/part-of"
|
|
22
|
+
# managed_by: -> "app.kubernetes.io/managed-by"
|
|
23
|
+
#
|
|
24
|
+
# Any unrecognized keys are passed through as-is (string or symbol).
|
|
25
|
+
#
|
|
26
|
+
class Labels < Middleware
|
|
27
|
+
STANDARD_KEYS = {
|
|
28
|
+
app: :"app.kubernetes.io/name",
|
|
29
|
+
instance: :"app.kubernetes.io/instance",
|
|
30
|
+
version: :"app.kubernetes.io/version",
|
|
31
|
+
component: :"app.kubernetes.io/component",
|
|
32
|
+
part_of: :"app.kubernetes.io/part-of",
|
|
33
|
+
managed_by: :"app.kubernetes.io/managed-by",
|
|
34
|
+
}.freeze
|
|
35
|
+
|
|
36
|
+
def initialize(**labels)
|
|
37
|
+
@labels = normalize(labels)
|
|
38
|
+
end
|
|
39
|
+
|
|
40
|
+
def call(resource)
|
|
41
|
+
h = resource.to_h
|
|
42
|
+
h[:metadata] ||= {}
|
|
43
|
+
h[:metadata][:labels] = @labels.merge(h[:metadata][:labels] || {})
|
|
44
|
+
rebuild(resource, h)
|
|
45
|
+
end
|
|
46
|
+
|
|
47
|
+
private
|
|
48
|
+
|
|
49
|
+
def normalize(labels)
|
|
50
|
+
labels.each_with_object({}) do |(key, value), result|
|
|
51
|
+
normalized_key = STANDARD_KEYS.fetch(key, key)
|
|
52
|
+
result[normalized_key] = value.to_s
|
|
53
|
+
end
|
|
54
|
+
end
|
|
55
|
+
end
|
|
56
|
+
end
|
|
57
|
+
end
|
|
58
|
+
end
|
|
59
|
+
end
|
|
@@ -0,0 +1,31 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module Kube
|
|
4
|
+
module Cluster
|
|
5
|
+
class Manifest < Kube::Schema::Manifest
|
|
6
|
+
class Middleware
|
|
7
|
+
# Sets +metadata.namespace+ on all namespace-scoped resources.
|
|
8
|
+
# Cluster-scoped kinds (Namespace, ClusterRole, etc.) are skipped.
|
|
9
|
+
#
|
|
10
|
+
# stack do
|
|
11
|
+
# use Middleware::Namespace, "production"
|
|
12
|
+
# end
|
|
13
|
+
#
|
|
14
|
+
class Namespace < Middleware
|
|
15
|
+
def initialize(namespace)
|
|
16
|
+
@namespace = namespace
|
|
17
|
+
end
|
|
18
|
+
|
|
19
|
+
def call(resource)
|
|
20
|
+
return resource if cluster_scoped?(resource)
|
|
21
|
+
|
|
22
|
+
h = resource.to_h
|
|
23
|
+
h[:metadata] ||= {}
|
|
24
|
+
h[:metadata][:namespace] = @namespace
|
|
25
|
+
rebuild(resource, h)
|
|
26
|
+
end
|
|
27
|
+
end
|
|
28
|
+
end
|
|
29
|
+
end
|
|
30
|
+
end
|
|
31
|
+
end
|
|
@@ -0,0 +1,61 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module Kube
|
|
4
|
+
module Cluster
|
|
5
|
+
class Manifest < Kube::Schema::Manifest
|
|
6
|
+
class Middleware
|
|
7
|
+
# Injects soft pod anti-affinity on pod-bearing resources so
|
|
8
|
+
# that pods prefer to spread across nodes.
|
|
9
|
+
#
|
|
10
|
+
# The anti-affinity uses the resource's own +matchLabels+ from
|
|
11
|
+
# +spec.selector.matchLabels+ as the label selector, and
|
|
12
|
+
# +kubernetes.io/hostname+ as the topology key.
|
|
13
|
+
#
|
|
14
|
+
# Resources that already have +spec.template.spec.affinity+
|
|
15
|
+
# set are left untouched.
|
|
16
|
+
#
|
|
17
|
+
# stack do
|
|
18
|
+
# use Middleware::PodAntiAffinity
|
|
19
|
+
# use Middleware::PodAntiAffinity, topology_key: "topology.kubernetes.io/zone"
|
|
20
|
+
# end
|
|
21
|
+
#
|
|
22
|
+
class PodAntiAffinity < Middleware
|
|
23
|
+
def initialize(topology_key: "kubernetes.io/hostname", weight: 1)
|
|
24
|
+
@topology_key = topology_key
|
|
25
|
+
@weight = weight
|
|
26
|
+
end
|
|
27
|
+
|
|
28
|
+
def call(resource)
|
|
29
|
+
return resource unless pod_bearing?(resource)
|
|
30
|
+
|
|
31
|
+
h = resource.to_h
|
|
32
|
+
pod_spec = pod_template(h)
|
|
33
|
+
return resource unless pod_spec
|
|
34
|
+
|
|
35
|
+
# Don't overwrite existing affinity configuration.
|
|
36
|
+
return resource if pod_spec[:affinity]
|
|
37
|
+
|
|
38
|
+
match_labels = h.dig(:spec, :selector, :matchLabels)
|
|
39
|
+
return resource unless match_labels && !match_labels.empty?
|
|
40
|
+
|
|
41
|
+
pod_spec[:affinity] = {
|
|
42
|
+
podAntiAffinity: {
|
|
43
|
+
preferredDuringSchedulingIgnoredDuringExecution: [
|
|
44
|
+
{
|
|
45
|
+
weight: @weight,
|
|
46
|
+
podAffinityTerm: {
|
|
47
|
+
labelSelector: { matchLabels: match_labels },
|
|
48
|
+
topologyKey: @topology_key,
|
|
49
|
+
},
|
|
50
|
+
},
|
|
51
|
+
],
|
|
52
|
+
},
|
|
53
|
+
}
|
|
54
|
+
|
|
55
|
+
rebuild(resource, h)
|
|
56
|
+
end
|
|
57
|
+
end
|
|
58
|
+
end
|
|
59
|
+
end
|
|
60
|
+
end
|
|
61
|
+
end
|
|
@@ -0,0 +1,64 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module Kube
|
|
4
|
+
module Cluster
|
|
5
|
+
class Manifest < Kube::Schema::Manifest
|
|
6
|
+
class Middleware
|
|
7
|
+
# Reads the +app.kubernetes.io/size+ label from pod-bearing
|
|
8
|
+
# resources and injects CPU/memory requests and limits into
|
|
9
|
+
# every container.
|
|
10
|
+
#
|
|
11
|
+
# The label on the resource is the input:
|
|
12
|
+
#
|
|
13
|
+
# Kube::Schema["Deployment"].new {
|
|
14
|
+
# metadata.labels = { "app.kubernetes.io/size": "small" }
|
|
15
|
+
# ...
|
|
16
|
+
# }
|
|
17
|
+
#
|
|
18
|
+
# Register in the stack — no arguments needed:
|
|
19
|
+
#
|
|
20
|
+
# stack do
|
|
21
|
+
# use Middleware::ResourcePreset
|
|
22
|
+
# end
|
|
23
|
+
#
|
|
24
|
+
# Available sizes: nano, micro, small, medium, large, xlarge, 2xlarge.
|
|
25
|
+
# Limits are ~1.5x requests (following Bitnami conventions).
|
|
26
|
+
#
|
|
27
|
+
class ResourcePreset < Middleware
|
|
28
|
+
LABEL = :"app.kubernetes.io/size"
|
|
29
|
+
|
|
30
|
+
PRESETS = {
|
|
31
|
+
"nano" => { requests: { cpu: "100m", memory: "128Mi" }, limits: { cpu: "150m", memory: "192Mi" } },
|
|
32
|
+
"micro" => { requests: { cpu: "250m", memory: "256Mi" }, limits: { cpu: "375m", memory: "384Mi" } },
|
|
33
|
+
"small" => { requests: { cpu: "500m", memory: "512Mi" }, limits: { cpu: "750m", memory: "768Mi" } },
|
|
34
|
+
"medium" => { requests: { cpu: "500m", memory: "1024Mi" }, limits: { cpu: "750m", memory: "1536Mi" } },
|
|
35
|
+
"large" => { requests: { cpu: "1", memory: "2048Mi" }, limits: { cpu: "1.5", memory: "3072Mi" } },
|
|
36
|
+
"xlarge" => { requests: { cpu: "1", memory: "3072Mi" }, limits: { cpu: "3", memory: "6144Mi" } },
|
|
37
|
+
"2xlarge" => { requests: { cpu: "1", memory: "3072Mi" }, limits: { cpu: "6", memory: "12288Mi" } },
|
|
38
|
+
}.freeze
|
|
39
|
+
|
|
40
|
+
def call(resource)
|
|
41
|
+
size = label(resource, LABEL)
|
|
42
|
+
return resource unless size
|
|
43
|
+
return resource unless pod_bearing?(resource)
|
|
44
|
+
|
|
45
|
+
preset = PRESETS.fetch(size.to_s) do
|
|
46
|
+
raise ArgumentError, "Unknown size preset: #{size.inspect}. " \
|
|
47
|
+
"Valid sizes: #{PRESETS.keys.join(', ')}"
|
|
48
|
+
end
|
|
49
|
+
|
|
50
|
+
h = resource.to_h
|
|
51
|
+
pod_spec = pod_template(h)
|
|
52
|
+
return resource unless pod_spec
|
|
53
|
+
|
|
54
|
+
each_container(pod_spec) do |container|
|
|
55
|
+
container[:resources] = deep_merge(preset, container[:resources] || {})
|
|
56
|
+
end
|
|
57
|
+
|
|
58
|
+
rebuild(resource, h)
|
|
59
|
+
end
|
|
60
|
+
end
|
|
61
|
+
end
|
|
62
|
+
end
|
|
63
|
+
end
|
|
64
|
+
end
|
|
@@ -0,0 +1,84 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module Kube
|
|
4
|
+
module Cluster
|
|
5
|
+
class Manifest < Kube::Schema::Manifest
|
|
6
|
+
class Middleware
|
|
7
|
+
# Injects pod and container security contexts on pod-bearing resources.
|
|
8
|
+
#
|
|
9
|
+
# Reads the +app.kubernetes.io/security+ label. When the label
|
|
10
|
+
# is absent, the middleware applies the default profile.
|
|
11
|
+
#
|
|
12
|
+
# Kube::Schema["Deployment"].new {
|
|
13
|
+
# metadata.labels = { "app.kubernetes.io/security": "restricted" }
|
|
14
|
+
# ...
|
|
15
|
+
# }
|
|
16
|
+
#
|
|
17
|
+
# Available profiles: +restricted+ (default), +baseline+.
|
|
18
|
+
#
|
|
19
|
+
# stack do
|
|
20
|
+
# use Middleware::SecurityContext # default: restricted
|
|
21
|
+
# use Middleware::SecurityContext, default: :baseline # change default
|
|
22
|
+
# end
|
|
23
|
+
#
|
|
24
|
+
class SecurityContext < Middleware
|
|
25
|
+
LABEL = :"app.kubernetes.io/security"
|
|
26
|
+
|
|
27
|
+
PROFILES = {
|
|
28
|
+
"restricted" => {
|
|
29
|
+
pod: {
|
|
30
|
+
runAsNonRoot: true,
|
|
31
|
+
runAsUser: 1000,
|
|
32
|
+
runAsGroup: 1000,
|
|
33
|
+
fsGroup: 1000,
|
|
34
|
+
seccompProfile: { type: "RuntimeDefault" },
|
|
35
|
+
},
|
|
36
|
+
container: {
|
|
37
|
+
allowPrivilegeEscalation: false,
|
|
38
|
+
readOnlyRootFilesystem: true,
|
|
39
|
+
capabilities: { drop: ["ALL"] },
|
|
40
|
+
},
|
|
41
|
+
},
|
|
42
|
+
"baseline" => {
|
|
43
|
+
pod: {
|
|
44
|
+
runAsNonRoot: true,
|
|
45
|
+
runAsUser: 1000,
|
|
46
|
+
runAsGroup: 1000,
|
|
47
|
+
fsGroup: 1000,
|
|
48
|
+
},
|
|
49
|
+
container: {
|
|
50
|
+
allowPrivilegeEscalation: false,
|
|
51
|
+
},
|
|
52
|
+
},
|
|
53
|
+
}.freeze
|
|
54
|
+
|
|
55
|
+
def initialize(default: :restricted)
|
|
56
|
+
@default = default.to_s
|
|
57
|
+
end
|
|
58
|
+
|
|
59
|
+
def call(resource)
|
|
60
|
+
return resource unless pod_bearing?(resource)
|
|
61
|
+
|
|
62
|
+
profile_name = label(resource, LABEL) || @default
|
|
63
|
+
profile = PROFILES.fetch(profile_name.to_s) do
|
|
64
|
+
raise ArgumentError, "Unknown security profile: #{profile_name.inspect}. " \
|
|
65
|
+
"Valid profiles: #{PROFILES.keys.join(', ')}"
|
|
66
|
+
end
|
|
67
|
+
|
|
68
|
+
h = resource.to_h
|
|
69
|
+
pod_spec = pod_template(h)
|
|
70
|
+
return resource unless pod_spec
|
|
71
|
+
|
|
72
|
+
pod_spec[:securityContext] = deep_merge(profile[:pod], pod_spec[:securityContext] || {})
|
|
73
|
+
|
|
74
|
+
each_container(pod_spec) do |container|
|
|
75
|
+
container[:securityContext] = deep_merge(profile[:container], container[:securityContext] || {})
|
|
76
|
+
end
|
|
77
|
+
|
|
78
|
+
rebuild(resource, h)
|
|
79
|
+
end
|
|
80
|
+
end
|
|
81
|
+
end
|
|
82
|
+
end
|
|
83
|
+
end
|
|
84
|
+
end
|
|
@@ -0,0 +1,69 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module Kube
|
|
4
|
+
module Cluster
|
|
5
|
+
class Manifest < Kube::Schema::Manifest
|
|
6
|
+
class Middleware
|
|
7
|
+
# Generates a Service for every pod-bearing resource that has
|
|
8
|
+
# containers with named ports.
|
|
9
|
+
#
|
|
10
|
+
# The generated Service uses +spec.selector.matchLabels+ from
|
|
11
|
+
# the source resource and maps each named container port.
|
|
12
|
+
#
|
|
13
|
+
# Labels and namespace are copied from the source resource, so
|
|
14
|
+
# subsequent middleware (Labels, Namespace, etc.) will also
|
|
15
|
+
# apply to the generated Service.
|
|
16
|
+
#
|
|
17
|
+
# stack do
|
|
18
|
+
# use Middleware::ServiceForDeployment
|
|
19
|
+
# end
|
|
20
|
+
#
|
|
21
|
+
class ServiceForDeployment < Middleware
|
|
22
|
+
def call(resource)
|
|
23
|
+
return resource unless pod_bearing?(resource)
|
|
24
|
+
|
|
25
|
+
h = resource.to_h
|
|
26
|
+
ports = extract_ports(h)
|
|
27
|
+
return resource if ports.empty?
|
|
28
|
+
|
|
29
|
+
match_labels = h.dig(:spec, :selector, :matchLabels)
|
|
30
|
+
return resource unless match_labels && !match_labels.empty?
|
|
31
|
+
|
|
32
|
+
service = Kube::Schema["Service"].new {
|
|
33
|
+
metadata.name = h.dig(:metadata, :name)
|
|
34
|
+
metadata.namespace = h.dig(:metadata, :namespace) if h.dig(:metadata, :namespace)
|
|
35
|
+
metadata.labels = h.dig(:metadata, :labels) || {}
|
|
36
|
+
|
|
37
|
+
spec.selector = match_labels
|
|
38
|
+
spec.ports = ports.map { |p|
|
|
39
|
+
{
|
|
40
|
+
name: p[:name],
|
|
41
|
+
port: p[:containerPort],
|
|
42
|
+
targetPort: p[:name],
|
|
43
|
+
protocol: p.fetch(:protocol, "TCP"),
|
|
44
|
+
}
|
|
45
|
+
}
|
|
46
|
+
}
|
|
47
|
+
|
|
48
|
+
[resource, service]
|
|
49
|
+
end
|
|
50
|
+
|
|
51
|
+
private
|
|
52
|
+
|
|
53
|
+
def extract_ports(hash)
|
|
54
|
+
pod_spec = pod_template(hash)
|
|
55
|
+
return [] unless pod_spec
|
|
56
|
+
|
|
57
|
+
ports = []
|
|
58
|
+
each_container(pod_spec) do |container|
|
|
59
|
+
Array(container[:ports]).each do |port|
|
|
60
|
+
ports << port if port[:name]
|
|
61
|
+
end
|
|
62
|
+
end
|
|
63
|
+
ports
|
|
64
|
+
end
|
|
65
|
+
end
|
|
66
|
+
end
|
|
67
|
+
end
|
|
68
|
+
end
|
|
69
|
+
end
|
|
@@ -0,0 +1,178 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
# Patch BlackHoleStruct to handle arrays consistently.
|
|
4
|
+
#
|
|
5
|
+
# The upstream gem does not recurse into arrays — hashes inside arrays
|
|
6
|
+
# are not converted to BlackHoleStruct on construction, and are not
|
|
7
|
+
# converted back to plain Hash on #to_h. This causes key-type
|
|
8
|
+
# inconsistencies after a Resource round-trip (symbol keys become
|
|
9
|
+
# string keys inside arrays).
|
|
10
|
+
#
|
|
11
|
+
# These two patches fix both directions:
|
|
12
|
+
# initialize — converts hashes inside arrays to BlackHoleStruct
|
|
13
|
+
# to_h — converts BlackHoleStruct/arrays back to plain objects
|
|
14
|
+
class BlackHoleStruct
|
|
15
|
+
def initialize(hash = {})
|
|
16
|
+
raise ArgumentError, "Argument should be a Hash" unless hash.is_a?(Hash)
|
|
17
|
+
|
|
18
|
+
@table = {}
|
|
19
|
+
hash.each do |key, value|
|
|
20
|
+
@table[key.to_sym] = deep_wrap(value)
|
|
21
|
+
end
|
|
22
|
+
end
|
|
23
|
+
|
|
24
|
+
def to_h
|
|
25
|
+
hash = {}
|
|
26
|
+
@table.each do |key, value|
|
|
27
|
+
hash[key] = deep_unwrap(value)
|
|
28
|
+
end
|
|
29
|
+
hash
|
|
30
|
+
end
|
|
31
|
+
|
|
32
|
+
private
|
|
33
|
+
|
|
34
|
+
def deep_wrap(value)
|
|
35
|
+
case value
|
|
36
|
+
when Hash then self.class.new(value)
|
|
37
|
+
when Array then value.map { |v| deep_wrap(v) }
|
|
38
|
+
else value
|
|
39
|
+
end
|
|
40
|
+
end
|
|
41
|
+
|
|
42
|
+
def deep_unwrap(value)
|
|
43
|
+
case value
|
|
44
|
+
when self.class then value.to_h
|
|
45
|
+
when Array then value.map { |v| deep_unwrap(v) }
|
|
46
|
+
else value
|
|
47
|
+
end
|
|
48
|
+
end
|
|
49
|
+
end
|
|
50
|
+
|
|
51
|
+
module Kube
|
|
52
|
+
module Cluster
|
|
53
|
+
class Manifest < Kube::Schema::Manifest
|
|
54
|
+
# Base class for manifest middleware.
|
|
55
|
+
#
|
|
56
|
+
# Middleware receives a single resource and returns either:
|
|
57
|
+
# - A single resource (transform)
|
|
58
|
+
# - An array of resources (generative — e.g. Deployment in, [Deployment, Service] out)
|
|
59
|
+
#
|
|
60
|
+
# The stack processes the full manifest at each stage, so resources
|
|
61
|
+
# generated by one middleware flow through all subsequent stages.
|
|
62
|
+
#
|
|
63
|
+
# Transform example:
|
|
64
|
+
#
|
|
65
|
+
# class AddTeamLabel < Middleware
|
|
66
|
+
# def call(resource)
|
|
67
|
+
# h = resource.to_h
|
|
68
|
+
# h[:metadata][:labels][:"app.kubernetes.io/team"] = "platform"
|
|
69
|
+
# rebuild(resource, h)
|
|
70
|
+
# end
|
|
71
|
+
# end
|
|
72
|
+
#
|
|
73
|
+
# Generative example:
|
|
74
|
+
#
|
|
75
|
+
# class ServiceForDeployment < Middleware
|
|
76
|
+
# def call(resource)
|
|
77
|
+
# return resource unless pod_bearing?(resource)
|
|
78
|
+
# service = build_service_from(resource)
|
|
79
|
+
# [resource, service]
|
|
80
|
+
# end
|
|
81
|
+
# end
|
|
82
|
+
#
|
|
83
|
+
class Middleware
|
|
84
|
+
POD_BEARING_KINDS = %w[Deployment StatefulSet DaemonSet Job CronJob ReplicaSet].freeze
|
|
85
|
+
|
|
86
|
+
CLUSTER_SCOPED_KINDS = %w[
|
|
87
|
+
Namespace ClusterRole ClusterRoleBinding
|
|
88
|
+
PersistentVolume StorageClass IngressClass
|
|
89
|
+
CustomResourceDefinition PriorityClass
|
|
90
|
+
RuntimeClass VolumeAttachment
|
|
91
|
+
CSIDriver CSINode
|
|
92
|
+
].freeze
|
|
93
|
+
|
|
94
|
+
def initialize(**opts)
|
|
95
|
+
@opts = opts
|
|
96
|
+
end
|
|
97
|
+
|
|
98
|
+
# Override in subclasses. Receives a single Resource, returns
|
|
99
|
+
# a single Resource (transform) or an array of Resources
|
|
100
|
+
# (generative).
|
|
101
|
+
def call(resource)
|
|
102
|
+
resource
|
|
103
|
+
end
|
|
104
|
+
|
|
105
|
+
private
|
|
106
|
+
|
|
107
|
+
# Build a new resource of the same schema subclass from a hash.
|
|
108
|
+
def rebuild(resource, hash)
|
|
109
|
+
resource.class.new(hash)
|
|
110
|
+
end
|
|
111
|
+
|
|
112
|
+
# Read a label value from the resource.
|
|
113
|
+
def label(resource, key)
|
|
114
|
+
labels = resource.to_h.dig(:metadata, :labels) || {}
|
|
115
|
+
labels[key.to_sym] || labels[key.to_s]
|
|
116
|
+
end
|
|
117
|
+
|
|
118
|
+
# Read an annotation value from the resource.
|
|
119
|
+
def annotation(resource, key)
|
|
120
|
+
annotations = resource.to_h.dig(:metadata, :annotations) || {}
|
|
121
|
+
annotations[key.to_sym] || annotations[key.to_s]
|
|
122
|
+
end
|
|
123
|
+
|
|
124
|
+
# The resource kind as a String (e.g. "Deployment").
|
|
125
|
+
def kind(resource)
|
|
126
|
+
h = resource.to_h
|
|
127
|
+
(h[:kind] || h["kind"]).to_s
|
|
128
|
+
end
|
|
129
|
+
|
|
130
|
+
# Is this a resource that contains a pod template?
|
|
131
|
+
def pod_bearing?(resource)
|
|
132
|
+
POD_BEARING_KINDS.include?(kind(resource))
|
|
133
|
+
end
|
|
134
|
+
|
|
135
|
+
# Is this a cluster-scoped resource (no namespace)?
|
|
136
|
+
def cluster_scoped?(resource)
|
|
137
|
+
CLUSTER_SCOPED_KINDS.include?(kind(resource))
|
|
138
|
+
end
|
|
139
|
+
|
|
140
|
+
# Returns the pod template spec path from a resource hash,
|
|
141
|
+
# accounting for CronJob's extra nesting.
|
|
142
|
+
def pod_template(hash)
|
|
143
|
+
if kind_from_hash(hash) == "CronJob"
|
|
144
|
+
hash.dig(:spec, :jobTemplate, :spec, :template, :spec)
|
|
145
|
+
else
|
|
146
|
+
hash.dig(:spec, :template, :spec)
|
|
147
|
+
end
|
|
148
|
+
end
|
|
149
|
+
|
|
150
|
+
# Walk every container list in a pod spec (containers,
|
|
151
|
+
# initContainers) and yield each container hash.
|
|
152
|
+
def each_container(pod_spec, &block)
|
|
153
|
+
return unless pod_spec
|
|
154
|
+
|
|
155
|
+
[:containers, :initContainers].each do |key|
|
|
156
|
+
Array(pod_spec[key]).each(&block)
|
|
157
|
+
end
|
|
158
|
+
end
|
|
159
|
+
|
|
160
|
+
# Extract kind from a hash (symbol or string keys).
|
|
161
|
+
def kind_from_hash(hash)
|
|
162
|
+
(hash[:kind] || hash["kind"]).to_s
|
|
163
|
+
end
|
|
164
|
+
|
|
165
|
+
# Deep-merge two hashes (right wins on conflict).
|
|
166
|
+
def deep_merge(base, overlay)
|
|
167
|
+
base.merge(overlay) do |_key, old_val, new_val|
|
|
168
|
+
if old_val.is_a?(Hash) && new_val.is_a?(Hash)
|
|
169
|
+
deep_merge(old_val, new_val)
|
|
170
|
+
else
|
|
171
|
+
new_val
|
|
172
|
+
end
|
|
173
|
+
end
|
|
174
|
+
end
|
|
175
|
+
end
|
|
176
|
+
end
|
|
177
|
+
end
|
|
178
|
+
end
|
|
@@ -0,0 +1,56 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module Kube
|
|
4
|
+
module Cluster
|
|
5
|
+
class Manifest < Kube::Schema::Manifest
|
|
6
|
+
# An ordered pipeline of middleware that processes the full manifest
|
|
7
|
+
# at each stage. Each stage flat_maps individual resources through
|
|
8
|
+
# a single middleware — so generative middleware can introduce new
|
|
9
|
+
# resources that subsequent stages will see and process.
|
|
10
|
+
#
|
|
11
|
+
# stack = Kube::Cluster::Manifest::Stack.new do
|
|
12
|
+
# use Middleware::ServiceForDeployment # generates Services
|
|
13
|
+
# use Middleware::Labels, app: "web" # labels everything, including generated Services
|
|
14
|
+
# use Middleware::ResourcePreset # sizes everything
|
|
15
|
+
# end
|
|
16
|
+
#
|
|
17
|
+
# processed = stack.call(resources)
|
|
18
|
+
#
|
|
19
|
+
class Stack
|
|
20
|
+
def initialize(&block)
|
|
21
|
+
@middleware = []
|
|
22
|
+
instance_eval(&block) if block
|
|
23
|
+
end
|
|
24
|
+
|
|
25
|
+
# Register a middleware class with optional positional and keyword arguments.
|
|
26
|
+
#
|
|
27
|
+
# @param klass [Class] a Middleware subclass
|
|
28
|
+
# @param args [Array] positional arguments forwarded to klass.new
|
|
29
|
+
# @param kwargs [Hash] keyword arguments forwarded to klass.new
|
|
30
|
+
def use(klass, *args, **kwargs)
|
|
31
|
+
@middleware << [klass, args, kwargs]
|
|
32
|
+
end
|
|
33
|
+
|
|
34
|
+
# Run the full resource array through every middleware stage in order.
|
|
35
|
+
# At each stage, every resource is passed individually through the
|
|
36
|
+
# middleware. Middleware can return a single resource (transform) or
|
|
37
|
+
# an array of resources (generative). The results are collected into
|
|
38
|
+
# the array for the next stage.
|
|
39
|
+
#
|
|
40
|
+
# @param resources [Array<Kube::Schema::Resource>]
|
|
41
|
+
# @return [Array<Kube::Schema::Resource>]
|
|
42
|
+
def call(resources)
|
|
43
|
+
@middleware.reduce(resources) do |current, (klass, args, kwargs)|
|
|
44
|
+
middleware = klass.new(*args, **kwargs)
|
|
45
|
+
current.flat_map { |r| Array(middleware.call(r)) }
|
|
46
|
+
end
|
|
47
|
+
end
|
|
48
|
+
|
|
49
|
+
# True when no middleware has been registered.
|
|
50
|
+
def empty?
|
|
51
|
+
@middleware.empty?
|
|
52
|
+
end
|
|
53
|
+
end
|
|
54
|
+
end
|
|
55
|
+
end
|
|
56
|
+
end
|