kube_cluster 0.2.0 → 0.3.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (59) hide show
  1. checksums.yaml +4 -4
  2. data/.github/workflows/release.yml +43 -0
  3. data/.github/workflows/tag-gem-version-bump.yml +47 -0
  4. data/.gitignore +2 -0
  5. data/Gemfile.lock +48 -52
  6. data/bin/console +3 -0
  7. data/bin/dev +4 -0
  8. data/docker-compose.yml +26 -0
  9. data/examples/01-basic-redis-pod/manifest.rb +60 -0
  10. data/examples/02-manifest-with-middleware/manifest.rb +37 -0
  11. data/examples/02-manifest-with-middleware/middleware/labels.rb +4 -0
  12. data/examples/02-manifest-with-middleware/middleware/namespace.rb +4 -0
  13. data/examples/02-manifest-with-middleware/templates/config_map.rb +13 -0
  14. data/examples/02-manifest-with-middleware/templates/deployment.rb +59 -0
  15. data/examples/02-manifest-with-middleware/templates/horizontal_pod_autoscaler.rb +30 -0
  16. data/examples/02-manifest-with-middleware/templates/ingress.rb +38 -0
  17. data/examples/02-manifest-with-middleware/templates/service.rb +12 -0
  18. data/examples/03-app-with-database/demo.rb +87 -0
  19. data/examples/03-app-with-database/helpers.rb +18 -0
  20. data/examples/03-app-with-database/my_app.rb +45 -0
  21. data/examples/03-app-with-database/postgresql.rb +81 -0
  22. data/examples/03-app-with-database/ruby_on_rails.rb +31 -0
  23. data/flake.lock +3 -3
  24. data/flake.nix +6 -0
  25. data/kube_cluster.gemspec +3 -1
  26. data/lib/kube/cli/cluster.rb +41 -0
  27. data/lib/kube/cluster/connection.rb +18 -0
  28. data/lib/kube/cluster/instance.rb +21 -0
  29. data/lib/kube/cluster/manifest.rb +25 -0
  30. data/lib/kube/cluster/middleware/annotations.rb +32 -0
  31. data/lib/kube/cluster/middleware/hpa_for_deployment.rb +111 -0
  32. data/lib/kube/cluster/middleware/ingress_for_service.rb +91 -0
  33. data/lib/kube/cluster/middleware/labels.rb +59 -0
  34. data/lib/kube/cluster/middleware/namespace.rb +31 -0
  35. data/lib/kube/cluster/middleware/pod_anti_affinity.rb +61 -0
  36. data/lib/kube/cluster/middleware/resource_preset.rb +64 -0
  37. data/lib/kube/cluster/middleware/security_context.rb +84 -0
  38. data/lib/kube/cluster/middleware/service_for_deployment.rb +71 -0
  39. data/lib/kube/cluster/middleware/stack.rb +43 -0
  40. data/lib/kube/cluster/middleware.rb +69 -0
  41. data/lib/kube/cluster/resource/dirty_tracking.rb +113 -0
  42. data/lib/kube/cluster/resource/persistence.rb +67 -0
  43. data/lib/kube/cluster/resource.rb +99 -0
  44. data/lib/kube/cluster/version.rb +1 -1
  45. data/lib/kube/cluster.rb +34 -7
  46. data/lib/kube/errors.rb +57 -0
  47. metadata +69 -17
  48. data/Rakefile +0 -11
  49. data/TREE_PLAN.md +0 -513
  50. data/bin/generate-command-schema-v1 +0 -44
  51. data/data/kubectl-command-tree-v1-minimal.json +0 -125
  52. data/data/kubectl-command-tree-v1.json +0 -1469
  53. data/examples/quick-repl/docker-compose.yml +0 -52
  54. data/exe/kube_cluster +0 -6
  55. data/lib/kube/cluster/command_node.rb +0 -89
  56. data/lib/kube/cluster/ctl.rb +0 -33
  57. data/lib/kube/cluster/query_builder.rb +0 -35
  58. data/lib/kube/cluster/resource_selector.rb +0 -19
  59. data/lib/kube/cluster/tree_node.rb +0 -51
@@ -0,0 +1,61 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Kube
4
+ module Cluster
5
+ class Middleware
6
+ # Injects soft pod anti-affinity on pod-bearing resources so
7
+ # that pods prefer to spread across nodes.
8
+ #
9
+ # The anti-affinity uses the resource's own +matchLabels+ from
10
+ # +spec.selector.matchLabels+ as the label selector, and
11
+ # +kubernetes.io/hostname+ as the topology key.
12
+ #
13
+ # Resources that already have +spec.template.spec.affinity+
14
+ # set are left untouched.
15
+ #
16
+ # stack do
17
+ # use Middleware::PodAntiAffinity
18
+ # use Middleware::PodAntiAffinity, topology_key: "topology.kubernetes.io/zone"
19
+ # end
20
+ #
21
+ class PodAntiAffinity < Middleware
22
+ def initialize(topology_key: "kubernetes.io/hostname", weight: 1)
23
+ @topology_key = topology_key
24
+ @weight = weight
25
+ end
26
+
27
+ def call(manifest)
28
+ manifest.resources.map! do |resource|
29
+ next resource unless resource.pod_bearing?
30
+
31
+ h = resource.to_h
32
+ pod_spec = resource.pod_template(h)
33
+ next resource unless pod_spec
34
+
35
+ # Don't overwrite existing affinity configuration.
36
+ next resource if pod_spec[:affinity]
37
+
38
+ match_labels = h.dig(:spec, :selector, :matchLabels)
39
+ next resource unless match_labels && !match_labels.empty?
40
+
41
+ pod_spec[:affinity] = {
42
+ podAntiAffinity: {
43
+ preferredDuringSchedulingIgnoredDuringExecution: [
44
+ {
45
+ weight: @weight,
46
+ podAffinityTerm: {
47
+ labelSelector: { matchLabels: match_labels },
48
+ topologyKey: @topology_key,
49
+ },
50
+ },
51
+ ],
52
+ },
53
+ }
54
+
55
+ resource.rebuild(h)
56
+ end
57
+ end
58
+ end
59
+ end
60
+ end
61
+ end
@@ -0,0 +1,64 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Kube
4
+ module Cluster
5
+ class Middleware
6
+ # Reads the +app.kubernetes.io/size+ label from pod-bearing
7
+ # resources and injects CPU/memory requests and limits into
8
+ # every container.
9
+ #
10
+ # The label on the resource is the input:
11
+ #
12
+ # Kube::Cluster["Deployment"].new {
13
+ # metadata.labels = { "app.kubernetes.io/size": "small" }
14
+ # ...
15
+ # }
16
+ #
17
+ # Register in the stack — no arguments needed:
18
+ #
19
+ # stack do
20
+ # use Middleware::ResourcePreset
21
+ # end
22
+ #
23
+ # Available sizes: nano, micro, small, medium, large, xlarge, 2xlarge.
24
+ # Limits are ~1.5x requests (following Bitnami conventions).
25
+ #
26
+ class ResourcePreset < Middleware
27
+ LABEL = :"app.kubernetes.io/size"
28
+
29
+ PRESETS = {
30
+ "nano" => { requests: { cpu: "100m", memory: "128Mi" }, limits: { cpu: "150m", memory: "192Mi" } },
31
+ "micro" => { requests: { cpu: "250m", memory: "256Mi" }, limits: { cpu: "375m", memory: "384Mi" } },
32
+ "small" => { requests: { cpu: "500m", memory: "512Mi" }, limits: { cpu: "750m", memory: "768Mi" } },
33
+ "medium" => { requests: { cpu: "500m", memory: "1024Mi" }, limits: { cpu: "750m", memory: "1536Mi" } },
34
+ "large" => { requests: { cpu: "1", memory: "2048Mi" }, limits: { cpu: "1.5", memory: "3072Mi" } },
35
+ "xlarge" => { requests: { cpu: "1", memory: "3072Mi" }, limits: { cpu: "3", memory: "6144Mi" } },
36
+ "2xlarge" => { requests: { cpu: "1", memory: "3072Mi" }, limits: { cpu: "6", memory: "12288Mi" } },
37
+ }.freeze
38
+
39
+ def call(manifest)
40
+ manifest.resources.map! do |resource|
41
+ size = resource.label(LABEL)
42
+ next resource unless size
43
+ next resource unless resource.pod_bearing?
44
+
45
+ preset = PRESETS.fetch(size.to_s) do
46
+ raise ArgumentError, "Unknown size preset: #{size.inspect}. " \
47
+ "Valid sizes: #{PRESETS.keys.join(', ')}"
48
+ end
49
+
50
+ h = resource.to_h
51
+ pod_spec = resource.pod_template(h)
52
+ next resource unless pod_spec
53
+
54
+ resource.each_container(pod_spec) do |container|
55
+ container[:resources] = deep_merge(preset, container[:resources] || {})
56
+ end
57
+
58
+ resource.rebuild(h)
59
+ end
60
+ end
61
+ end
62
+ end
63
+ end
64
+ end
@@ -0,0 +1,84 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Kube
4
+ module Cluster
5
+ class Middleware
6
+ # Injects pod and container security contexts on pod-bearing resources.
7
+ #
8
+ # Reads the +app.kubernetes.io/security+ label. When the label
9
+ # is absent, the middleware applies the default profile.
10
+ #
11
+ # Kube::Cluster["Deployment"].new {
12
+ # metadata.labels = { "app.kubernetes.io/security": "restricted" }
13
+ # ...
14
+ # }
15
+ #
16
+ # Available profiles: +restricted+ (default), +baseline+.
17
+ #
18
+ # stack do
19
+ # use Middleware::SecurityContext # default: restricted
20
+ # use Middleware::SecurityContext, default: :baseline # change default
21
+ # end
22
+ #
23
+ class SecurityContext < Middleware
24
+ LABEL = :"app.kubernetes.io/security"
25
+
26
+ PROFILES = {
27
+ "restricted" => {
28
+ pod: {
29
+ runAsNonRoot: true,
30
+ runAsUser: 1000,
31
+ runAsGroup: 1000,
32
+ fsGroup: 1000,
33
+ seccompProfile: { type: "RuntimeDefault" },
34
+ },
35
+ container: {
36
+ allowPrivilegeEscalation: false,
37
+ readOnlyRootFilesystem: true,
38
+ capabilities: { drop: ["ALL"] },
39
+ },
40
+ },
41
+ "baseline" => {
42
+ pod: {
43
+ runAsNonRoot: true,
44
+ runAsUser: 1000,
45
+ runAsGroup: 1000,
46
+ fsGroup: 1000,
47
+ },
48
+ container: {
49
+ allowPrivilegeEscalation: false,
50
+ },
51
+ },
52
+ }.freeze
53
+
54
+ def initialize(default: :restricted)
55
+ @default = default.to_s
56
+ end
57
+
58
+ def call(manifest)
59
+ manifest.resources.map! do |resource|
60
+ next resource unless resource.pod_bearing?
61
+
62
+ profile_name = resource.label(LABEL) || @default
63
+ profile = PROFILES.fetch(profile_name.to_s) do
64
+ raise ArgumentError, "Unknown security profile: #{profile_name.inspect}. " \
65
+ "Valid profiles: #{PROFILES.keys.join(', ')}"
66
+ end
67
+
68
+ h = resource.to_h
69
+ pod_spec = resource.pod_template(h)
70
+ next resource unless pod_spec
71
+
72
+ pod_spec[:securityContext] = deep_merge(profile[:pod], pod_spec[:securityContext] || {})
73
+
74
+ resource.each_container(pod_spec) do |container|
75
+ container[:securityContext] = deep_merge(profile[:container], container[:securityContext] || {})
76
+ end
77
+
78
+ resource.rebuild(h)
79
+ end
80
+ end
81
+ end
82
+ end
83
+ end
84
+ end
@@ -0,0 +1,71 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Kube
4
+ module Cluster
5
+ class Middleware
6
+ # Generates a Service for every pod-bearing resource that has
7
+ # containers with named ports.
8
+ #
9
+ # The generated Service uses +spec.selector.matchLabels+ from
10
+ # the source resource and maps each named container port.
11
+ #
12
+ # Labels and namespace are copied from the source resource, so
13
+ # subsequent middleware (Labels, Namespace, etc.) will also
14
+ # apply to the generated Service.
15
+ #
16
+ # stack do
17
+ # use Middleware::ServiceForDeployment
18
+ # end
19
+ #
20
+ class ServiceForDeployment < Middleware
21
+ def call(manifest)
22
+ generated = []
23
+
24
+ manifest.resources.each do |resource|
25
+ next unless resource.pod_bearing?
26
+
27
+ h = resource.to_h
28
+ ports = extract_ports(resource, h)
29
+ next if ports.empty?
30
+
31
+ match_labels = h.dig(:spec, :selector, :matchLabels)
32
+ next unless match_labels && !match_labels.empty?
33
+
34
+ generated << Kube::Cluster["Service"].new {
35
+ metadata.name = h.dig(:metadata, :name)
36
+ metadata.namespace = h.dig(:metadata, :namespace) if h.dig(:metadata, :namespace)
37
+ metadata.labels = h.dig(:metadata, :labels) || {}
38
+
39
+ spec.selector = match_labels
40
+ spec.ports = ports.map { |p|
41
+ {
42
+ name: p[:name],
43
+ port: p[:containerPort],
44
+ targetPort: p[:name],
45
+ protocol: p.fetch(:protocol, "TCP"),
46
+ }
47
+ }
48
+ }
49
+ end
50
+
51
+ manifest.resources.concat(generated)
52
+ end
53
+
54
+ private
55
+
56
+ def extract_ports(resource, hash)
57
+ pod_spec = resource.pod_template(hash)
58
+ return [] unless pod_spec
59
+
60
+ ports = []
61
+ resource.each_container(pod_spec) do |container|
62
+ Array(container[:ports]).each do |port|
63
+ ports << port if port[:name]
64
+ end
65
+ end
66
+ ports
67
+ end
68
+ end
69
+ end
70
+ end
71
+ end
@@ -0,0 +1,43 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Kube
4
+ module Cluster
5
+ class Middleware
6
+ # An ordered pipeline of middleware that processes a manifest.
7
+ # Each middleware receives the manifest and mutates it in place.
8
+ #
9
+ # stack = Kube::Cluster::Middleware::Stack.new do
10
+ # use Middleware::ServiceForDeployment
11
+ # use Middleware::Labels, app: "web"
12
+ # use Middleware::Namespace, "production"
13
+ # end
14
+ #
15
+ # stack.call(manifest)
16
+ #
17
+ class Stack
18
+ def initialize(&block)
19
+ @middleware = []
20
+ instance_eval(&block) if block
21
+ end
22
+
23
+ # Register a middleware class with optional positional and keyword arguments.
24
+ def use(klass, *args, **kwargs)
25
+ @middleware << [klass, args, kwargs]
26
+ end
27
+
28
+ # Run the manifest through every middleware in order.
29
+ # Each middleware mutates the manifest in place.
30
+ def call(manifest)
31
+ @middleware.each do |klass, args, kwargs|
32
+ klass.new(*args, **kwargs).call(manifest)
33
+ end
34
+ end
35
+
36
+ # True when no middleware has been registered.
37
+ def empty?
38
+ @middleware.empty?
39
+ end
40
+ end
41
+ end
42
+ end
43
+ end
@@ -0,0 +1,69 @@
1
+ # frozen_string_literal: true
2
+
3
+ require_relative "middleware/stack"
4
+ require_relative "middleware/namespace"
5
+ require_relative "middleware/labels"
6
+ require_relative "middleware/annotations"
7
+ require_relative "middleware/resource_preset"
8
+ require_relative "middleware/security_context"
9
+ require_relative "middleware/pod_anti_affinity"
10
+ require_relative "middleware/service_for_deployment"
11
+ require_relative "middleware/ingress_for_service"
12
+ require_relative "middleware/hpa_for_deployment"
13
+
14
+ module Kube
15
+ module Cluster
16
+ # Base class for manifest middleware.
17
+ #
18
+ # Middleware receives the full manifest and mutates it in place.
19
+ # Each middleware is responsible for iterating resources as needed.
20
+ #
21
+ # Transform example:
22
+ #
23
+ # class AddTeamLabel < Middleware
24
+ # def call(manifest)
25
+ # manifest.resources.map! do |resource|
26
+ # h = resource.to_h
27
+ # h[:metadata][:labels][:"app.kubernetes.io/team"] = "platform"
28
+ # resource.rebuild(h)
29
+ # end
30
+ # end
31
+ # end
32
+ #
33
+ # Generative example:
34
+ #
35
+ # class ServiceForDeployment < Middleware
36
+ # def call(manifest)
37
+ # generated = []
38
+ # manifest.resources.each do |resource|
39
+ # next unless resource.pod_bearing?
40
+ # generated << build_service_from(resource)
41
+ # end
42
+ # manifest.resources.concat(generated)
43
+ # end
44
+ # end
45
+ #
46
+ class Middleware
47
+ def initialize(**opts)
48
+ @opts = opts
49
+ end
50
+
51
+ # Override in subclasses. Receives the full manifest,
52
+ # mutates it in place.
53
+ def call(manifest)
54
+ end
55
+
56
+ private
57
+
58
+ def deep_merge(base, overlay)
59
+ base.merge(overlay) do |_key, old_val, new_val|
60
+ if old_val.is_a?(Hash) && new_val.is_a?(Hash)
61
+ deep_merge(old_val, new_val)
62
+ else
63
+ new_val
64
+ end
65
+ end
66
+ end
67
+ end
68
+ end
69
+ end
@@ -0,0 +1,113 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Kube
4
+ module Cluster
5
+ class Resource < Kube::Schema::Resource
6
+ module DirtyTracking
7
+ def changed?
8
+ to_h != @clean
9
+ end
10
+
11
+ def changed
12
+ diff_keys(to_h, @clean)
13
+ end
14
+
15
+ def changes
16
+ build_changes(to_h, @clean)
17
+ end
18
+
19
+ def changes_applied
20
+ snapshot!
21
+ end
22
+
23
+ # Data suitable for a strategic-merge patch: only the
24
+ # keys/sub-trees that differ from the clean snapshot.
25
+ def patch_data
26
+ deep_diff(to_h, @clean)
27
+ end
28
+
29
+ def respond_to_missing?(name, include_private = false)
30
+ if name.end_with?("_changed?")
31
+ true
32
+ else
33
+ super
34
+ end
35
+ end
36
+
37
+ def method_missing(name, *args, &block)
38
+ if name.end_with?("_changed?")
39
+ attr = name.to_s.delete_suffix("_changed?").to_sym
40
+ old_val = @clean[attr]
41
+ new_val = to_h[attr]
42
+ old_val != new_val
43
+ else
44
+ super
45
+ end
46
+ end
47
+
48
+ private
49
+
50
+ def snapshot!
51
+ @clean = deep_dup(to_h)
52
+ end
53
+
54
+ def deep_diff(current, original)
55
+ Hash.new.tap do |result|
56
+ merged_keys = current.keys | original.keys
57
+
58
+ merged_keys.each do |key|
59
+ cur_val = current[key]
60
+ orig_val = original[key]
61
+
62
+ if cur_val.is_a?(Hash) && orig_val.is_a?(Hash)
63
+ nested = deep_diff(cur_val, orig_val)
64
+
65
+ if nested.empty?
66
+ next
67
+ else
68
+ result[key] = nested
69
+ end
70
+ elsif cur_val != orig_val
71
+ result[key] = [orig_val, cur_val]
72
+ end
73
+ end
74
+ end
75
+ end
76
+
77
+ def diff_keys(current, original)
78
+ Set.new.tap do |keys|
79
+ merged_keys = (current.keys | original.keys)
80
+
81
+ merged_keys.each do |key|
82
+ if current[key] != original[key]
83
+ keys << key
84
+ end
85
+ end
86
+ end.to_a
87
+ end
88
+
89
+ def build_changes(current, original)
90
+ Hash.new.tap do |hash|
91
+ merged_keys = current.keys | original.keys
92
+
93
+ merged_keys.each do |key|
94
+ if current[key] == original[key]
95
+ next
96
+ else
97
+ hash[key] = [original[key], current[key]]
98
+ end
99
+ end
100
+ end
101
+ end
102
+
103
+ def deep_dup(obj)
104
+ case obj
105
+ when Hash then obj.each_with_object({}) { |(k, v), h| h[k] = deep_dup(v) }
106
+ when Array then obj.map { |v| deep_dup(v) }
107
+ else obj
108
+ end
109
+ end
110
+ end
111
+ end
112
+ end
113
+ end
@@ -0,0 +1,67 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "json"
4
+ require "open3"
5
+
6
+ module Kube
7
+ module Cluster
8
+ class Resource < Kube::Schema::Resource
9
+ module Persistence
10
+ def apply
11
+ JSON.generate(deep_stringify_keys(to_h)).then do |json|
12
+ kubectl("apply", "-f", "-", stdin: json)
13
+ reload
14
+ true
15
+ end
16
+ end
17
+
18
+ def patch(type: "strategic")
19
+ if persisted?
20
+ diff = patch_data
21
+
22
+ if diff.empty?
23
+ false
24
+ else
25
+ json = JSON.generate(deep_stringify_keys(diff))
26
+ kubectl("patch", resource_type, name, *ns_flags, "--type", type, "-p", json)
27
+ reload
28
+ true
29
+ end
30
+ else
31
+ raise Kube::CommandError, "cannot patch a resource without a name"
32
+ end
33
+ end
34
+
35
+ def delete
36
+ if persisted?
37
+ kubectl("delete", resource_type, name, *ns_flags)
38
+ true
39
+ else
40
+ raise Kube::CommandError, "cannot delete a resource without a name"
41
+ end
42
+ end
43
+
44
+ def reload
45
+ if persisted?
46
+ tap do
47
+ kubectl("get", resource_type, name, *ns_flags, "-o", "json").then do |json|
48
+ JSON.parse(json).then do |hash|
49
+ @data = BlackHoleStruct.new(hash)
50
+ snapshot!
51
+ end
52
+ end
53
+ end
54
+ else
55
+ raise Kube::CommandError, "cannot reload a resource without a name"
56
+ end
57
+ end
58
+
59
+ private
60
+
61
+ def kubectl(*args)
62
+ @cluster.connection.ctl.run(args.join(" "))
63
+ end
64
+ end
65
+ end
66
+ end
67
+ end
@@ -0,0 +1,99 @@
1
+ # frozen_string_literal: true
2
+
3
+ require_relative "resource/dirty_tracking"
4
+ require_relative "resource/persistence"
5
+
6
+ module Kube
7
+ module Cluster
8
+ class Resource < Kube::Schema::Resource
9
+ include DirtyTracking
10
+ include Persistence
11
+
12
+ attr_accessor :cluster
13
+
14
+ POD_BEARING_KINDS = %w[
15
+ Deployment
16
+ StatefulSet
17
+ DaemonSet
18
+ Job
19
+ CronJob
20
+ ReplicaSet
21
+ ].freeze
22
+
23
+ CLUSTER_SCOPED_KINDS = %w[
24
+ Namespace
25
+ ClusterRole
26
+ ClusterRoleBinding
27
+ PersistentVolume
28
+ StorageClass
29
+ IngressClass
30
+ CustomResourceDefinition
31
+ PriorityClass
32
+ RuntimeClass
33
+ VolumeAttachment
34
+ CSIDriver
35
+ CSINode
36
+ ].freeze
37
+
38
+ def initialize(hash = {}, &block)
39
+ @cluster = hash.delete(:cluster)
40
+ super
41
+ snapshot!
42
+ end
43
+
44
+ # Build a new resource of the same schema subclass from a hash.
45
+ def rebuild(hash)
46
+ self.class.new(hash)
47
+ end
48
+
49
+ # Read a label value from the resource.
50
+ def label(key)
51
+ labels = to_h.dig(:metadata, :labels) || {}
52
+ labels[key.to_sym] || labels[key.to_s]
53
+ end
54
+
55
+ # Read an annotation value from the resource.
56
+ def annotation(key)
57
+ annotations = to_h.dig(:metadata, :annotations) || {}
58
+ annotations[key.to_sym] || annotations[key.to_s]
59
+ end
60
+
61
+ # The resource kind as a String (e.g. "Deployment").
62
+ def kind
63
+ h = to_h
64
+ (h[:kind] || h["kind"]).to_s
65
+ end
66
+
67
+ # Is this a resource that contains a pod template?
68
+ def pod_bearing?
69
+ POD_BEARING_KINDS.include?(kind)
70
+ end
71
+
72
+ # Is this a cluster-scoped resource (no namespace)?
73
+ def cluster_scoped?
74
+ CLUSTER_SCOPED_KINDS.include?(kind)
75
+ end
76
+
77
+ # Returns the pod template spec path from a resource hash,
78
+ # accounting for CronJob's extra nesting.
79
+ def pod_template(hash)
80
+ if (hash[:kind] || hash["kind"]).to_s == "CronJob"
81
+ hash.dig(:spec, :jobTemplate, :spec, :template, :spec)
82
+ else
83
+ hash.dig(:spec, :template, :spec)
84
+ end
85
+ end
86
+
87
+ # Walk every container list in a pod spec (containers,
88
+ # initContainers) and yield each container hash.
89
+ def each_container(pod_spec, &block)
90
+ return unless pod_spec
91
+
92
+ [:containers, :initContainers].each do |key|
93
+ Array(pod_spec[key]).each(&block)
94
+ end
95
+ end
96
+
97
+ end
98
+ end
99
+ end
@@ -2,6 +2,6 @@
2
2
 
3
3
  module Kube
4
4
  module Cluster
5
- VERSION = "0.2.0"
5
+ VERSION = "0.3.0"
6
6
  end
7
7
  end