kube_cluster 0.2.1 → 0.3.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (45) hide show
  1. checksums.yaml +4 -4
  2. data/Gemfile.lock +8 -10
  3. data/examples/01-basic-redis-pod/manifest.rb +3 -3
  4. data/examples/02-manifest-with-middleware/manifest.rb +37 -0
  5. data/examples/02-manifest-with-middleware/middleware/labels.rb +4 -0
  6. data/examples/02-manifest-with-middleware/middleware/namespace.rb +4 -0
  7. data/examples/02-manifest-with-middleware/templates/config_map.rb +13 -0
  8. data/examples/02-manifest-with-middleware/templates/deployment.rb +59 -0
  9. data/examples/02-manifest-with-middleware/templates/horizontal_pod_autoscaler.rb +30 -0
  10. data/examples/02-manifest-with-middleware/templates/ingress.rb +38 -0
  11. data/examples/02-manifest-with-middleware/templates/service.rb +12 -0
  12. data/examples/{version2 → 03-app-with-database}/demo.rb +2 -2
  13. data/examples/{version2 → 03-app-with-database}/postgresql.rb +4 -4
  14. data/examples/{version2 → 03-app-with-database}/ruby_on_rails.rb +1 -1
  15. data/kube_cluster.gemspec +1 -1
  16. data/lib/kube/cluster/manifest.rb +13 -64
  17. data/lib/kube/cluster/middleware/annotations.rb +32 -0
  18. data/lib/kube/cluster/middleware/hpa_for_deployment.rb +111 -0
  19. data/lib/kube/cluster/{manifest/middleware → middleware}/ingress_for_service.rb +36 -34
  20. data/lib/kube/cluster/middleware/labels.rb +59 -0
  21. data/lib/kube/cluster/middleware/namespace.rb +31 -0
  22. data/lib/kube/cluster/middleware/pod_anti_affinity.rb +61 -0
  23. data/lib/kube/cluster/middleware/resource_preset.rb +64 -0
  24. data/lib/kube/cluster/middleware/security_context.rb +84 -0
  25. data/lib/kube/cluster/middleware/service_for_deployment.rb +71 -0
  26. data/lib/kube/cluster/middleware/stack.rb +43 -0
  27. data/lib/kube/cluster/middleware.rb +69 -0
  28. data/lib/kube/cluster/resource.rb +78 -0
  29. data/lib/kube/cluster/version.rb +1 -1
  30. data/lib/kube/cluster.rb +21 -0
  31. metadata +27 -21
  32. data/examples/database/manifest.rb +0 -238
  33. data/examples/web-app/manifest.rb +0 -215
  34. data/lib/kube/cluster/manifest/middleware/annotations.rb +0 -32
  35. data/lib/kube/cluster/manifest/middleware/hpa_for_deployment.rb +0 -109
  36. data/lib/kube/cluster/manifest/middleware/labels.rb +0 -59
  37. data/lib/kube/cluster/manifest/middleware/namespace.rb +0 -31
  38. data/lib/kube/cluster/manifest/middleware/pod_anti_affinity.rb +0 -61
  39. data/lib/kube/cluster/manifest/middleware/resource_preset.rb +0 -64
  40. data/lib/kube/cluster/manifest/middleware/security_context.rb +0 -84
  41. data/lib/kube/cluster/manifest/middleware/service_for_deployment.rb +0 -69
  42. data/lib/kube/cluster/manifest/middleware.rb +0 -178
  43. data/lib/kube/cluster/manifest/stack.rb +0 -56
  44. /data/examples/{version2 → 03-app-with-database}/helpers.rb +0 -0
  45. /data/examples/{version2 → 03-app-with-database}/my_app.rb +0 -0
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: f6b2fcc6e1de8c59841b080e8150aa249d84fc064f4eab2e981be9ded9dc2290
4
- data.tar.gz: 1a55810edfb5809b44699e2a17984b9f0311257aba52796da6b74e293ce3a9d0
3
+ metadata.gz: 5659907f3883730ad9d55c7e73bf664d0dd3047a9c9ce38eb15f6ee86e13dc6a
4
+ data.tar.gz: 4776a870e33f4278bc992f8653ae6b7d22d3c8dcf5d1b0eff99dd54b6407ba46
5
5
  SHA512:
6
- metadata.gz: b64081cec15ce53b3a80effecba8f2ae3ec43ee58d1dc483196a821f37a4d6aa0aabbdd3d21e5df7eb5208cbd8bdbcf1aefbf82a9911733795b5ad4689c2c62e
7
- data.tar.gz: ef610d9b33d6907a92f69c077080a8f26ffd6a1e2f2e391b9f54564187b6799af2e275b2f9f6cb3b056f3f9b3952b964921692a2d4456ee5ee7af229b209ac0c
6
+ metadata.gz: 7c4c0cb0dd1695a17aa2ef5a2ed32f27f8ae41ab3c00e1232f230cacb5cc253f769446f8019035149b5554eba1aa0a6c5f30d81e9a61a29b46fa954252cf9d13
7
+ data.tar.gz: b74836f42d1acd37c9e25d9a7fb885910c1f52642615ee8cb29a489f47b04a81e44495e41b244c711c1e7a8ecdc4208e0e4dc23f27ee6994df9e421c073558be
data/Gemfile.lock CHANGED
@@ -1,17 +1,16 @@
1
1
  PATH
2
2
  remote: .
3
3
  specs:
4
- kube_cluster (0.2.0)
4
+ kube_cluster (0.3.0)
5
5
  kube_kit (> 0)
6
6
  kube_kubectl (~> 2.0.0)
7
- kube_schema (~> 1.2.0)
7
+ kube_schema (~> 1.3.0)
8
8
 
9
9
  GEM
10
10
  remote: https://rubygems.org/
11
11
  specs:
12
12
  ast (2.4.3)
13
13
  bigdecimal (4.1.2)
14
- black_hole_struct (0.1.3)
15
14
  date (3.5.1)
16
15
  debug (1.11.1)
17
16
  irb (~> 1.10)
@@ -24,23 +23,22 @@ GEM
24
23
  prism (>= 1.3.0)
25
24
  rdoc (>= 4.0.0)
26
25
  reline (>= 0.4.2)
27
- json (2.19.3)
26
+ json (2.19.4)
28
27
  json_schemer (2.5.0)
29
28
  bigdecimal
30
29
  hana (~> 1.3)
31
30
  regexp_parser (~> 2.0)
32
31
  simpleidn (~> 0.2)
33
32
  kube_kit (0.2.0)
34
- kube_kubectl (2.0.1)
33
+ kube_kubectl (2.0.2)
35
34
  debug (~> 1.11)
36
35
  json_schemer (~> 2.5)
37
36
  rubyshell (~> 1.5)
38
37
  shellwords (~> 0.2.2)
39
38
  string_builder (~> 1.2.0)
40
- kube_schema (1.2.1)
41
- black_hole_struct (~> 0.1)
42
- json_schemer (~> 2.5)
43
- rubyshell (~> 1.5)
39
+ kube_schema (1.3.1)
40
+ json_schemer (~> 2.5.0)
41
+ rubyshell (~> 1.5.0)
44
42
  language_server-protocol (3.17.0.5)
45
43
  lint_roller (1.1.0)
46
44
  minitest (5.27.0)
@@ -83,7 +81,7 @@ GEM
83
81
  rubyshell (1.5.0)
84
82
  shellwords (0.2.2)
85
83
  simpleidn (0.2.3)
86
- string_builder (1.2.0)
84
+ string_builder (1.2.1)
87
85
  stringio (3.2.0)
88
86
  tsort (0.2.0)
89
87
  unicode-display_width (3.2.0)
@@ -1,9 +1,9 @@
1
1
  require "bundler/setup"
2
- require "kube/schema"
2
+ require "kube/cluster"
3
3
 
4
- class RedisPod < Kube::Schema['Pod']
4
+ class RedisPod < Kube::Cluster['Pod']
5
5
  def initialize(container_name: 'my-redis-container', **options, &block)
6
- super {
6
+ super(**options) {
7
7
  spec.containers = [
8
8
  {
9
9
  name: container_name,
@@ -0,0 +1,37 @@
1
+ require "bundler/setup"
2
+ require "kube/cluster"
3
+
4
+ APP_NAME = "my-app"
5
+ FULLNAME = "my-app"
6
+ IMAGE = "my-app:latest"
7
+ MATCH_LABELS = { app: APP_NAME }
8
+ STANDARD_LABELS = { app: APP_NAME, version: "1.0" }
9
+ RESOURCES = { requests: { cpu: "100m", memory: "128Mi" }, limits: { cpu: "500m", memory: "256Mi" } }
10
+
11
+ require_relative 'templates/config_map'
12
+ require_relative 'templates/deployment'
13
+ require_relative 'templates/ingress'
14
+ require_relative 'templates/service'
15
+ require_relative 'templates/horizontal_pod_autoscaler'
16
+
17
+ require_relative 'middleware/labels'
18
+ require_relative 'middleware/namespace'
19
+
20
+ namespace = APP_NAME
21
+
22
+ manifest = Kube::Cluster::Manifest.new(
23
+ ConfigMap.new(namespace: namespace),
24
+ Deployment.new(namespace: namespace),
25
+ Ingress.new(namespace: namespace),
26
+ Service.new(namespace: namespace),
27
+ HorizontalPodAutoscaler.new(namespace: namespace),
28
+ )
29
+
30
+ stack = Kube::Cluster::Middleware::Stack.new do
31
+ use Middleware::Namespace
32
+ use Middleware::Labels
33
+ end
34
+
35
+ stack.call(manifest)
36
+
37
+ puts manifest.to_yaml
@@ -0,0 +1,4 @@
1
+ module Middleware
2
+ class Labels < Kube::Cluster::Middleware
3
+ end
4
+ end
@@ -0,0 +1,4 @@
1
+ module Middleware
2
+ class Namespace < Kube::Cluster::Middleware
3
+ end
4
+ end
@@ -0,0 +1,13 @@
1
+ class ConfigMap < Kube::Cluster["ConfigMap"]
2
+ def initialize(namespace:)
3
+ super {
4
+ metadata.name = "#{namespace}-config"
5
+ spec.data = {
6
+ "RAILS_ENV": "production",
7
+ "LOG_LEVEL": "info",
8
+ "WORKERS": "4",
9
+ "PORT": "3000",
10
+ }
11
+ }
12
+ end
13
+ end
@@ -0,0 +1,59 @@
1
+ class Deployment < Kube::Cluster["Deployment"]
2
+ def initialize(namespace:)
3
+ super {
4
+ metadata.name = namespace
5
+
6
+ spec.replicas = 3
7
+ spec.selector.matchLabels = MATCH_LABELS
8
+
9
+ spec.template.metadata.labels = STANDARD_LABELS
10
+ spec.template.metadata.annotations = {
11
+ # Checksum pattern from _utils.tpl -- triggers rolling restart on config change
12
+ "checksum/config": "{{ sha256sum of configmap data }}",
13
+ }
14
+
15
+ spec.template.spec.containers = [
16
+ {
17
+ name: APP_NAME,
18
+ image: IMAGE,
19
+ ports: [{ name: "http", containerPort: 3000, protocol: "TCP" }],
20
+ resources: RESOURCES,
21
+ env: [
22
+ { name: "PORT", value: "3000" },
23
+ ],
24
+ envFrom: [
25
+ { configMapRef: { name: "#{FULLNAME}-config" } },
26
+ ],
27
+ livenessProbe: {
28
+ httpGet: { path: "/healthz", port: "http" },
29
+ initialDelaySeconds: 15,
30
+ periodSeconds: 10,
31
+ },
32
+ readinessProbe: {
33
+ httpGet: { path: "/readyz", port: "http" },
34
+ initialDelaySeconds: 5,
35
+ periodSeconds: 5,
36
+ },
37
+ },
38
+ ]
39
+
40
+ # Pod anti-affinity (from _affinities.tpl)
41
+ # Soft anti-affinity: prefer spreading pods across nodes but don't enforce it
42
+ spec.template.spec.affinity = {
43
+ podAntiAffinity: {
44
+ preferredDuringSchedulingIgnoredDuringExecution: [
45
+ {
46
+ weight: 1,
47
+ podAffinityTerm: {
48
+ labelSelector: {
49
+ matchLabels: MATCH_LABELS,
50
+ },
51
+ topologyKey: "kubernetes.io/hostname",
52
+ },
53
+ },
54
+ ],
55
+ },
56
+ }
57
+ }
58
+ end
59
+ end
@@ -0,0 +1,30 @@
1
+ class HorizontalPodAutoscaler < Kube::Cluster["HorizontalPodAutoscaler"]
2
+ def initialize(namespace:)
3
+ super {
4
+ metadata.name = namespace
5
+
6
+ spec.scaleTargetRef.apiVersion = "apps/v1"
7
+ spec.scaleTargetRef.kind = "Deployment"
8
+ spec.scaleTargetRef.name = namespace
9
+
10
+ spec.minReplicas = 3
11
+ spec.maxReplicas = 10
12
+ spec.metrics = [
13
+ {
14
+ type: "Resource",
15
+ resource: {
16
+ name: "cpu",
17
+ target: { type: "Utilization", averageUtilization: 75 },
18
+ },
19
+ },
20
+ {
21
+ type: "Resource",
22
+ resource: {
23
+ name: "memory",
24
+ target: { type: "Utilization", averageUtilization: 80 },
25
+ },
26
+ },
27
+ ]
28
+ }
29
+ end
30
+ end
@@ -0,0 +1,38 @@
1
+ class Ingress < Kube::Cluster["Ingress"]
2
+ def initialize(namespace:)
3
+ super {
4
+ metadata.name = namespace
5
+ metadata.annotations = {
6
+ "cert-manager.io/cluster-issuer": "letsencrypt-prod",
7
+ "nginx.ingress.kubernetes.io/ssl-redirect": "true",
8
+ }
9
+
10
+ spec.ingressClassName = "nginx"
11
+ spec.tls = [
12
+ {
13
+ hosts: ["app.example.com"],
14
+ secretName: "#{namespace}-tls",
15
+ },
16
+ ]
17
+ spec.rules = [
18
+ {
19
+ host: "app.example.com",
20
+ http: {
21
+ paths: [
22
+ {
23
+ path: "/",
24
+ pathType: "Prefix",
25
+ backend: {
26
+ service: {
27
+ name: FULLNAME,
28
+ port: { name: "http" },
29
+ },
30
+ },
31
+ },
32
+ ],
33
+ },
34
+ },
35
+ ]
36
+ }
37
+ end
38
+ end
@@ -0,0 +1,12 @@
1
+ class Service < Kube::Cluster["Service"]
2
+ def initialize(namespace:)
3
+ super {
4
+ metadata.name = namespace
5
+
6
+ spec.selector = MATCH_LABELS
7
+ spec.ports = [
8
+ { name: "http", port: 80, targetPort: "http", protocol: "TCP" },
9
+ ]
10
+ }
11
+ end
12
+ end
@@ -44,12 +44,12 @@ app = MyApp.new("example.com", size: :small) do |m|
44
44
  # Middleware injects: resource limits, security context, anti-affinity, labels
45
45
 
46
46
  [
47
- Kube::Schema["Namespace"].new {
47
+ Kube::Cluster["Namespace"].new {
48
48
  metadata.name = ns
49
49
  metadata.labels = labels
50
50
  },
51
51
 
52
- Kube::Schema["ConfigMap"].new {
52
+ Kube::Cluster["ConfigMap"].new {
53
53
  metadata.name = "#{name}-config"
54
54
  metadata.namespace = ns
55
55
  metadata.labels = labels
@@ -10,7 +10,7 @@ class Postgresql < Kube::Cluster::Manifest
10
10
  self << Secret.new
11
11
  end
12
12
 
13
- class StatefulSet < Kube::Schema["StatefulSet"]
13
+ class StatefulSet < Kube::Cluster["StatefulSet"]
14
14
  metadata.name = db_name
15
15
  metadata.namespace = db_ns
16
16
  metadata.labels = db_labels
@@ -55,12 +55,12 @@ class Postgresql < Kube::Cluster::Manifest
55
55
  ]
56
56
  end
57
57
 
58
- class Namespace < Kube::Schema["Namespace"]
58
+ class Namespace < Kube::Cluster["Namespace"]
59
59
  metadata.name = db_ns
60
60
  metadata.labels = db_labels.reject { |k, _| k == :"app.kubernetes.io/component" }
61
61
  end
62
62
 
63
- class Secret < Kube::Schema["Secret"]
63
+ class Secret < Kube::Cluster["Secret"]
64
64
  metadata.name = db_name
65
65
  metadata.namespace = db_ns
66
66
  metadata.labels = db_labels
@@ -70,7 +70,7 @@ class Postgresql < Kube::Cluster::Manifest
70
70
 
71
71
  # Headless service for StatefulSet DNS — explicit because the
72
72
  # middleware-generated Service is a regular ClusterIP service.
73
- class Service < Kube::Schema["Service"]
73
+ class Service < Kube::Cluster["Service"]
74
74
  metadata.name = "#{db_name}-headless"
75
75
  metadata.namespace = db_ns
76
76
  metadata.labels = db_labels
@@ -1,4 +1,4 @@
1
- class RubyOnRails < Kube::Schema["Deployment"]
1
+ class RubyOnRails < Kube::Cluster["Deployment"]
2
2
  default do
3
3
  metadata.name = name
4
4
  metadata.namespace = ns
data/kube_cluster.gemspec CHANGED
@@ -32,7 +32,7 @@ Gem::Specification.new do |spec|
32
32
  spec.add_development_dependency "rake", "~> 13.0"
33
33
  spec.add_development_dependency "rubocop", "~> 1.21"
34
34
 
35
- spec.add_dependency "kube_schema", "~> 1.2.0"
35
+ spec.add_dependency "kube_schema", "~> 1.3.0"
36
36
  spec.add_dependency "kube_kit", "> 0"
37
37
  spec.add_dependency "kube_kubectl", "~> 2.0.0"
38
38
  end
@@ -1,76 +1,25 @@
1
1
  # frozen_string_literal: true
2
2
 
3
- require_relative "manifest/stack"
4
- require_relative "manifest/middleware"
5
- require_relative "manifest/middleware/namespace"
6
- require_relative "manifest/middleware/labels"
7
- require_relative "manifest/middleware/annotations"
8
- require_relative "manifest/middleware/resource_preset"
9
- require_relative "manifest/middleware/security_context"
10
- require_relative "manifest/middleware/pod_anti_affinity"
11
- require_relative "manifest/middleware/service_for_deployment"
12
- require_relative "manifest/middleware/ingress_for_service"
13
- require_relative "manifest/middleware/hpa_for_deployment"
14
-
15
3
  module Kube
16
4
  module Cluster
17
- # A Manifest subclass that runs resources through a middleware stack
18
- # on enumeration. Manifests represent files — resources pass through
19
- # middleware before rendering or saving.
5
+ # A flat, ordered collection of Kubernetes resources.
6
+ #
7
+ # Manifest is a pure resource collection. Middleware is applied
8
+ # separately via Kube::Cluster::Middleware::Stack.
20
9
  #
21
- # class MyApp < Kube::Cluster::Manifest
22
- # stack do
23
- # use Middleware::Namespace, "production"
24
- # use Middleware::Labels, app: "web-app"
25
- # use Middleware::ResourcePreset
26
- # end
10
+ # manifest = Kube::Cluster::Manifest.new
11
+ # manifest << Kube::Cluster["Deployment"].new { ... }
12
+ #
13
+ # stack = Kube::Cluster::Middleware::Stack.new do
14
+ # use Middleware::Namespace, "production"
15
+ # use Middleware::Labels, app: "web-app"
27
16
  # end
28
17
  #
29
- # app = MyApp.new
30
- # app << Kube::Schema["Deployment"].new { ... }
31
- # app.to_yaml # resources have been transformed by the stack
18
+ # stack.call(manifest)
19
+ # manifest.to_yaml
32
20
  #
33
21
  class Manifest < Kube::Schema::Manifest
34
- # Declare a middleware stack at the class level.
35
- #
36
- # stack do
37
- # use Middleware::ResourcePreset
38
- # use Middleware::SecurityContext
39
- # end
40
- #
41
- def self.stack(&block)
42
- @stack = Stack.new(&block)
43
- end
44
-
45
- # Enumerate resources after passing them through the middleware
46
- # stack. The entire manifest is passed to the stack so that
47
- # generative middleware can introduce new resources that
48
- # subsequent stages will see and process.
49
- #
50
- # Every method that reads the manifest (to_yaml, to_a, map,
51
- # select, etc.) goes through here.
52
- def each(&block)
53
- return enum_for(:each) unless block
54
-
55
- stack = self.class.instance_variable_get(:@stack)
56
- if stack
57
- stack.call(@resources).each(&block)
58
- else
59
- @resources.each(&block)
60
- end
61
- end
62
-
63
- # Override to_yaml so it renders through the middleware stack.
64
- # The parent class accesses @resources directly, bypassing each.
65
- def to_yaml
66
- map { |r| r.to_yaml }.join("")
67
- end
68
-
69
- # Override to_a so it returns middleware-processed resources.
70
- # The parent class returns @resources.dup directly.
71
- def to_a
72
- map(&:itself)
73
- end
22
+ attr_reader :resources
74
23
  end
75
24
  end
76
25
  end
@@ -0,0 +1,32 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Kube
4
+ module Cluster
5
+ class Middleware
6
+ # Merges annotations into +metadata.annotations+ on every resource.
7
+ # Existing annotations are preserved; the supplied annotations act
8
+ # as defaults that can be overridden per-resource.
9
+ #
10
+ # stack do
11
+ # use Middleware::Annotations,
12
+ # "prometheus.io/scrape": "true",
13
+ # "prometheus.io/port": "9090"
14
+ # end
15
+ #
16
+ class Annotations < Middleware
17
+ def initialize(**annotations)
18
+ @annotations = annotations.transform_keys(&:to_sym).transform_values(&:to_s)
19
+ end
20
+
21
+ def call(manifest)
22
+ manifest.resources.map! do |resource|
23
+ h = resource.to_h
24
+ h[:metadata] ||= {}
25
+ h[:metadata][:annotations] = @annotations.merge(h[:metadata][:annotations] || {})
26
+ resource.rebuild(h)
27
+ end
28
+ end
29
+ end
30
+ end
31
+ end
32
+ end
@@ -0,0 +1,111 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Kube
4
+ module Cluster
5
+ class Middleware
6
+ # Generates a HorizontalPodAutoscaler for every pod-bearing
7
+ # resource that carries the +app.kubernetes.io/autoscale+ label.
8
+ #
9
+ # The label value encodes the min and max replicas as "min-max":
10
+ #
11
+ # metadata.labels = { "app.kubernetes.io/autoscale": "1-5" }
12
+ #
13
+ # Options:
14
+ # cpu: — target CPU utilization percentage (default: 75)
15
+ # memory: — target memory utilization percentage (default: 80)
16
+ #
17
+ # stack do
18
+ # use Middleware::HPAForDeployment
19
+ # use Middleware::HPAForDeployment, cpu: 60, memory: 70
20
+ # end
21
+ #
22
+ class HPAForDeployment < Middleware
23
+ LABEL = :"app.kubernetes.io/autoscale"
24
+
25
+ def initialize(cpu: 75, memory: 80)
26
+ @cpu = cpu
27
+ @memory = memory
28
+ end
29
+
30
+ def call(manifest)
31
+ generated = []
32
+
33
+ manifest.resources.each do |resource|
34
+ next unless resource.pod_bearing?
35
+
36
+ value = resource.label(LABEL)
37
+ next unless value
38
+
39
+ min, max = parse_range(value)
40
+
41
+ h = resource.to_h
42
+ name = h.dig(:metadata, :name)
43
+ namespace = h.dig(:metadata, :namespace)
44
+ labels = h.dig(:metadata, :labels) || {}
45
+ api_version = h[:apiVersion] || "apps/v1"
46
+ resource_kind = resource.kind
47
+
48
+ # Capture ivars as locals — the block runs via instance_exec
49
+ # on a BlackHoleStruct, so @ivars would resolve on the BHS.
50
+ cpu_target = @cpu
51
+ memory_target = @memory
52
+
53
+ generated << Kube::Cluster["HorizontalPodAutoscaler"].new {
54
+ metadata.name = name
55
+ metadata.namespace = namespace if namespace
56
+ metadata.labels = labels.reject { |k, _| k == LABEL }
57
+
58
+ spec.scaleTargetRef = {
59
+ apiVersion: api_version,
60
+ kind: resource_kind,
61
+ name: name,
62
+ }
63
+ spec.minReplicas = min
64
+ spec.maxReplicas = max
65
+ spec.metrics = [
66
+ {
67
+ type: "Resource",
68
+ resource: {
69
+ name: "cpu",
70
+ target: { type: "Utilization", averageUtilization: cpu_target },
71
+ },
72
+ },
73
+ {
74
+ type: "Resource",
75
+ resource: {
76
+ name: "memory",
77
+ target: { type: "Utilization", averageUtilization: memory_target },
78
+ },
79
+ },
80
+ ]
81
+ }
82
+ end
83
+
84
+ manifest.resources.concat(generated)
85
+ end
86
+
87
+ private
88
+
89
+ def parse_range(value)
90
+ parts = value.to_s.split("-", 2)
91
+
92
+ unless parts.length == 2
93
+ raise ArgumentError,
94
+ "Invalid autoscale label: #{value.inspect}. Expected format: \"min-max\" (e.g. \"1-5\")"
95
+ end
96
+
97
+ min = Integer(parts[0])
98
+ max = Integer(parts[1])
99
+
100
+ unless min > 0 && max >= min
101
+ raise ArgumentError,
102
+ "Invalid autoscale range: min=#{min}, max=#{max}. " \
103
+ "min must be > 0 and max must be >= min."
104
+ end
105
+
106
+ [min, max]
107
+ end
108
+ end
109
+ end
110
+ end
111
+ end