kube_cluster 0.3.7 → 0.3.8
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/Gemfile.lock +5 -5
- data/README.md +133 -10
- data/Rakefile +7 -0
- data/bin/test +8 -6
- data/kube_cluster.gemspec +1 -1
- data/lib/kube/cluster/manifest.rb +238 -0
- data/lib/kube/cluster/middleware/annotations.rb +69 -0
- data/lib/kube/cluster/middleware/hpa_for_deployment.rb +182 -0
- data/lib/kube/cluster/middleware/ingress_for_service.rb +127 -0
- data/lib/kube/cluster/middleware/labels.rb +96 -0
- data/lib/kube/cluster/middleware/namespace.rb +81 -0
- data/lib/kube/cluster/middleware/pod_anti_affinity.rb +137 -0
- data/lib/kube/cluster/middleware/resource_preset.rb +188 -0
- data/lib/kube/cluster/middleware/security_context.rb +170 -0
- data/lib/kube/cluster/middleware/service_for_deployment.rb +167 -0
- data/lib/kube/cluster/resource/dirty_tracking.rb +625 -0
- data/lib/kube/cluster/version.rb +1 -1
- data/lib/kube/cluster.rb +11 -0
- data/lib/kube/helm/chart.rb +410 -0
- data/lib/kube/helm/endpoint.rb +86 -0
- data/lib/kube/helm/repo.rb +203 -0
- metadata +4 -3
|
@@ -1,5 +1,10 @@
|
|
|
1
1
|
# frozen_string_literal: true
|
|
2
2
|
|
|
3
|
+
if __FILE__ == $0
|
|
4
|
+
require "bundler/setup"
|
|
5
|
+
require "kube/cluster"
|
|
6
|
+
end
|
|
7
|
+
|
|
3
8
|
module Kube
|
|
4
9
|
module Cluster
|
|
5
10
|
class Resource < Kube::Schema::Resource
|
|
@@ -111,3 +116,623 @@ module Kube
|
|
|
111
116
|
end
|
|
112
117
|
end
|
|
113
118
|
end
|
|
119
|
+
|
|
120
|
+
if __FILE__ == $0
|
|
121
|
+
require "minitest/autorun"
|
|
122
|
+
require "json"
|
|
123
|
+
|
|
124
|
+
# ---------------------------------------------------------------------------
|
|
125
|
+
# Fake ctl that records every command and returns canned responses.
|
|
126
|
+
# The test wires this into the cluster → connection → ctl chain so that
|
|
127
|
+
# Persistence#kubectl goes through it without touching a real cluster.
|
|
128
|
+
# ---------------------------------------------------------------------------
|
|
129
|
+
class FakeCtl
|
|
130
|
+
attr_reader :commands
|
|
131
|
+
|
|
132
|
+
def initialize
|
|
133
|
+
@commands = []
|
|
134
|
+
@responses = {}
|
|
135
|
+
end
|
|
136
|
+
|
|
137
|
+
# Queue a response for the next command that includes +substring+.
|
|
138
|
+
def stub_response(substring, response)
|
|
139
|
+
@responses[substring] = response
|
|
140
|
+
end
|
|
141
|
+
|
|
142
|
+
def run(string)
|
|
143
|
+
@commands << string
|
|
144
|
+
|
|
145
|
+
@responses.each do |substring, response|
|
|
146
|
+
if string.include?(substring)
|
|
147
|
+
return response
|
|
148
|
+
end
|
|
149
|
+
end
|
|
150
|
+
|
|
151
|
+
"" # default: empty response
|
|
152
|
+
end
|
|
153
|
+
end
|
|
154
|
+
|
|
155
|
+
# ---------------------------------------------------------------------------
|
|
156
|
+
# Minimal cluster double that provides .connection.ctl
|
|
157
|
+
# ---------------------------------------------------------------------------
|
|
158
|
+
class FakeConnection
|
|
159
|
+
attr_reader :ctl
|
|
160
|
+
|
|
161
|
+
def initialize(ctl)
|
|
162
|
+
@ctl = ctl
|
|
163
|
+
end
|
|
164
|
+
end
|
|
165
|
+
|
|
166
|
+
class FakeCluster
|
|
167
|
+
attr_reader :connection
|
|
168
|
+
|
|
169
|
+
def initialize(ctl)
|
|
170
|
+
@connection = FakeConnection.new(ctl)
|
|
171
|
+
end
|
|
172
|
+
end
|
|
173
|
+
|
|
174
|
+
# ---------------------------------------------------------------------------
|
|
175
|
+
# Helper to build a resource wired to a fake cluster.
|
|
176
|
+
# ---------------------------------------------------------------------------
|
|
177
|
+
module ResourceHelper
|
|
178
|
+
def build_resource(hash = {})
|
|
179
|
+
ctl = FakeCtl.new
|
|
180
|
+
cluster = FakeCluster.new(ctl)
|
|
181
|
+
resource = Kube::Cluster["ConfigMap"].new(hash.merge(kind: "ConfigMap", cluster: cluster))
|
|
182
|
+
[resource, ctl]
|
|
183
|
+
end
|
|
184
|
+
|
|
185
|
+
# Simulate what kubectl returns: the server adds extra fields.
|
|
186
|
+
def server_state(resource_hash, extra = {})
|
|
187
|
+
merged = resource_hash.merge(extra)
|
|
188
|
+
JSON.generate(stringify_keys(merged))
|
|
189
|
+
end
|
|
190
|
+
|
|
191
|
+
private
|
|
192
|
+
|
|
193
|
+
def stringify_keys(obj)
|
|
194
|
+
case obj
|
|
195
|
+
when Hash then obj.each_with_object({}) { |(k, v), h| h[k.to_s] = stringify_keys(v) }
|
|
196
|
+
when Array then obj.map { |v| stringify_keys(v) }
|
|
197
|
+
else obj
|
|
198
|
+
end
|
|
199
|
+
end
|
|
200
|
+
end
|
|
201
|
+
|
|
202
|
+
# ===========================================================================
|
|
203
|
+
# Integration tests — exercises DirtyTracking through the Persistence layer,
|
|
204
|
+
# driving the full Resource → Persistence → kubectl → DirtyTracking cycle.
|
|
205
|
+
# ===========================================================================
|
|
206
|
+
class DirtyTrackingIntegrationTest < Minitest::Test
|
|
207
|
+
include ResourceHelper
|
|
208
|
+
|
|
209
|
+
# -------------------------------------------------------------------------
|
|
210
|
+
# Full lifecycle: apply → mutate → detect changes → patch → clean
|
|
211
|
+
# -------------------------------------------------------------------------
|
|
212
|
+
|
|
213
|
+
def test_full_apply_mutate_patch_lifecycle
|
|
214
|
+
resource, ctl = build_resource(metadata: { name: "app-config", namespace: "production" }, spec: { key: "original" })
|
|
215
|
+
|
|
216
|
+
# Stub the reload after apply — server echoes back what we sent
|
|
217
|
+
ctl.stub_response("get", server_state(
|
|
218
|
+
metadata: { name: "app-config", namespace: "production", resourceVersion: "100" },
|
|
219
|
+
spec: { key: "original" }
|
|
220
|
+
))
|
|
221
|
+
|
|
222
|
+
resource.apply
|
|
223
|
+
|
|
224
|
+
# Post-apply the resource should be clean (reload calls snapshot!)
|
|
225
|
+
refute resource.changed?, "resource should be clean after apply + reload"
|
|
226
|
+
assert_equal({}, resource.changes)
|
|
227
|
+
assert_equal [], resource.changed
|
|
228
|
+
|
|
229
|
+
# Mutate
|
|
230
|
+
resource.instance_variable_get(:@data).spec.key = "updated"
|
|
231
|
+
|
|
232
|
+
# Now dirty
|
|
233
|
+
assert resource.changed?
|
|
234
|
+
|
|
235
|
+
# Stub reload after patch
|
|
236
|
+
ctl.stub_response("get", server_state(
|
|
237
|
+
metadata: { name: "app-config", namespace: "production", resourceVersion: "101" },
|
|
238
|
+
spec: { key: "updated" }
|
|
239
|
+
))
|
|
240
|
+
|
|
241
|
+
result = resource.patch
|
|
242
|
+
assert_equal true, result
|
|
243
|
+
|
|
244
|
+
# Post-patch the resource should be clean again
|
|
245
|
+
refute resource.changed?
|
|
246
|
+
assert_equal({}, resource.changes)
|
|
247
|
+
end
|
|
248
|
+
|
|
249
|
+
# -------------------------------------------------------------------------
|
|
250
|
+
# Patch returns false when nothing changed
|
|
251
|
+
# -------------------------------------------------------------------------
|
|
252
|
+
|
|
253
|
+
def test_patch_returns_false_when_clean
|
|
254
|
+
resource, ctl = build_resource(metadata: { name: "app-config", namespace: "default" }, spec: { key: "value" })
|
|
255
|
+
|
|
256
|
+
ctl.stub_response("get", server_state(
|
|
257
|
+
metadata: { name: "app-config", namespace: "default" }, spec: { key: "value" }
|
|
258
|
+
))
|
|
259
|
+
|
|
260
|
+
result = resource.patch
|
|
261
|
+
assert_equal false, result, "patch should return false when nothing changed"
|
|
262
|
+
|
|
263
|
+
# No patch command should have been issued
|
|
264
|
+
patch_commands = ctl.commands.select { |c| c.include?("patch") }
|
|
265
|
+
assert_empty patch_commands, "no kubectl patch should be issued when resource is clean"
|
|
266
|
+
end
|
|
267
|
+
|
|
268
|
+
# -------------------------------------------------------------------------
|
|
269
|
+
# Patch sends only the diff, not the full resource
|
|
270
|
+
# -------------------------------------------------------------------------
|
|
271
|
+
|
|
272
|
+
def test_patch_sends_only_changed_fields
|
|
273
|
+
resource, ctl = build_resource(
|
|
274
|
+
metadata: { name: "my-config", namespace: "staging" },
|
|
275
|
+
spec: { db_host: "old-db.internal", db_port: "5432", cache_ttl: "300" }
|
|
276
|
+
)
|
|
277
|
+
|
|
278
|
+
# Mutate one field
|
|
279
|
+
resource.instance_variable_get(:@data).spec.db_host = "new-db.internal"
|
|
280
|
+
|
|
281
|
+
ctl.stub_response("get", server_state(
|
|
282
|
+
metadata: { name: "my-config", namespace: "staging" },
|
|
283
|
+
spec: { db_host: "new-db.internal", db_port: "5432", cache_ttl: "300" }
|
|
284
|
+
))
|
|
285
|
+
|
|
286
|
+
resource.patch
|
|
287
|
+
|
|
288
|
+
# Find the patch command
|
|
289
|
+
patch_cmd = ctl.commands.find { |c| c.include?("patch") }
|
|
290
|
+
refute_nil patch_cmd, "a kubectl patch command should have been issued"
|
|
291
|
+
|
|
292
|
+
# Extract the JSON payload from the command (last arg after -p)
|
|
293
|
+
json_start = patch_cmd.index("-p ") + 3
|
|
294
|
+
payload = JSON.parse(patch_cmd[json_start..])
|
|
295
|
+
|
|
296
|
+
# The payload should contain the spec subtree but NOT metadata
|
|
297
|
+
assert payload.key?("spec"), "patch payload should include changed subtree"
|
|
298
|
+
refute payload.key?("metadata"), "patch payload should not include unchanged top-level keys"
|
|
299
|
+
end
|
|
300
|
+
|
|
301
|
+
# -------------------------------------------------------------------------
|
|
302
|
+
# Reload resets dirty state from server response
|
|
303
|
+
# -------------------------------------------------------------------------
|
|
304
|
+
|
|
305
|
+
def test_reload_resets_dirty_state
|
|
306
|
+
resource, ctl = build_resource(metadata: { name: "my-config", namespace: "default" }, spec: { key: "v1" })
|
|
307
|
+
|
|
308
|
+
# Local mutation
|
|
309
|
+
resource.instance_variable_get(:@data).spec.key = "local-change"
|
|
310
|
+
assert resource.changed?
|
|
311
|
+
|
|
312
|
+
# Server still has original
|
|
313
|
+
ctl.stub_response("get", server_state(
|
|
314
|
+
metadata: { name: "my-config", namespace: "default" }, spec: { key: "v1" }
|
|
315
|
+
))
|
|
316
|
+
|
|
317
|
+
resource.reload
|
|
318
|
+
|
|
319
|
+
# After reload, local changes are gone and resource is clean
|
|
320
|
+
refute resource.changed?
|
|
321
|
+
assert_equal "v1", resource.to_h[:spec][:key]
|
|
322
|
+
end
|
|
323
|
+
|
|
324
|
+
def test_reload_picks_up_server_side_changes
|
|
325
|
+
resource, ctl = build_resource(metadata: { name: "my-config", namespace: "default" }, spec: { key: "v1" })
|
|
326
|
+
|
|
327
|
+
# Server has been mutated externally
|
|
328
|
+
ctl.stub_response("get", server_state(
|
|
329
|
+
metadata: { name: "my-config", namespace: "default", resourceVersion: "200" },
|
|
330
|
+
spec: { key: "server-updated" }
|
|
331
|
+
))
|
|
332
|
+
|
|
333
|
+
resource.reload
|
|
334
|
+
|
|
335
|
+
# Resource reflects server state and is clean
|
|
336
|
+
refute resource.changed?
|
|
337
|
+
assert_equal "server-updated", resource.to_h[:spec][:key]
|
|
338
|
+
end
|
|
339
|
+
|
|
340
|
+
# -------------------------------------------------------------------------
|
|
341
|
+
# Apply snapshots after the server round-trip
|
|
342
|
+
# -------------------------------------------------------------------------
|
|
343
|
+
|
|
344
|
+
def test_apply_snapshots_server_response
|
|
345
|
+
resource, ctl = build_resource(metadata: { name: "my-config" }, spec: { key: "v1" })
|
|
346
|
+
|
|
347
|
+
# Server adds metadata on apply
|
|
348
|
+
ctl.stub_response("get", server_state(
|
|
349
|
+
metadata: { name: "my-config", resourceVersion: "1", uid: "abc-123" },
|
|
350
|
+
spec: { key: "v1" }
|
|
351
|
+
))
|
|
352
|
+
|
|
353
|
+
resource.apply
|
|
354
|
+
|
|
355
|
+
refute resource.changed?
|
|
356
|
+
|
|
357
|
+
# The snapshot should include server-added fields, so mutating
|
|
358
|
+
# the original field shows the correct old value
|
|
359
|
+
resource.instance_variable_get(:@data).spec.key = "v2"
|
|
360
|
+
changes = resource.changes
|
|
361
|
+
|
|
362
|
+
# changes[:spec] is [old_hash, new_hash]
|
|
363
|
+
old_spec, new_spec = changes[:spec]
|
|
364
|
+
assert_equal "v1", old_spec[:key]
|
|
365
|
+
assert_equal "v2", new_spec[:key]
|
|
366
|
+
|
|
367
|
+
# The resource should also have the server-added metadata
|
|
368
|
+
assert resource.to_h.key?(:metadata)
|
|
369
|
+
end
|
|
370
|
+
|
|
371
|
+
# -------------------------------------------------------------------------
|
|
372
|
+
# Error cases: unpersisted resources
|
|
373
|
+
# -------------------------------------------------------------------------
|
|
374
|
+
|
|
375
|
+
def test_patch_raises_on_unpersisted_resource
|
|
376
|
+
resource, _ctl = build_resource(spec: { key: "value" })
|
|
377
|
+
# No name → not persisted
|
|
378
|
+
|
|
379
|
+
error = assert_raises(Kube::CommandError) { resource.patch }
|
|
380
|
+
assert_match(/cannot patch/, error.message)
|
|
381
|
+
end
|
|
382
|
+
|
|
383
|
+
def test_delete_raises_on_unpersisted_resource
|
|
384
|
+
resource, _ctl = build_resource(spec: { key: "value" })
|
|
385
|
+
|
|
386
|
+
error = assert_raises(Kube::CommandError) { resource.delete }
|
|
387
|
+
assert_match(/cannot delete/, error.message)
|
|
388
|
+
end
|
|
389
|
+
|
|
390
|
+
def test_reload_raises_on_unpersisted_resource
|
|
391
|
+
resource, _ctl = build_resource(spec: { key: "value" })
|
|
392
|
+
|
|
393
|
+
error = assert_raises(Kube::CommandError) { resource.reload }
|
|
394
|
+
assert_match(/cannot reload/, error.message)
|
|
395
|
+
end
|
|
396
|
+
|
|
397
|
+
# -------------------------------------------------------------------------
|
|
398
|
+
# Nested mutation flows through patch_data correctly
|
|
399
|
+
# -------------------------------------------------------------------------
|
|
400
|
+
|
|
401
|
+
def test_nested_mutation_produces_nested_patch
|
|
402
|
+
resource, ctl = build_resource(
|
|
403
|
+
metadata: { name: "my-config", namespace: "default", labels: { app: "web", tier: "frontend" } }
|
|
404
|
+
)
|
|
405
|
+
|
|
406
|
+
# Mutate only a nested field
|
|
407
|
+
resource.instance_variable_get(:@data).metadata.labels.tier = "backend"
|
|
408
|
+
|
|
409
|
+
patch = resource.patch_data
|
|
410
|
+
assert_kind_of Hash, patch[:metadata], "patch_data should nest into metadata"
|
|
411
|
+
assert_kind_of Hash, patch[:metadata][:labels], "patch_data should nest into labels"
|
|
412
|
+
assert_equal ["frontend", "backend"], patch[:metadata][:labels][:tier]
|
|
413
|
+
|
|
414
|
+
# Unchanged sibling should not appear
|
|
415
|
+
refute patch[:metadata][:labels].key?(:app), "unchanged label should not appear in patch"
|
|
416
|
+
refute patch.key?(:spec), "unchanged top-level key should not appear in patch"
|
|
417
|
+
end
|
|
418
|
+
|
|
419
|
+
def test_deeply_nested_no_change_produces_empty_patch
|
|
420
|
+
resource, _ctl = build_resource(
|
|
421
|
+
metadata: { name: "my-config", labels: { app: "web" } }
|
|
422
|
+
)
|
|
423
|
+
|
|
424
|
+
assert_equal({}, resource.patch_data)
|
|
425
|
+
end
|
|
426
|
+
|
|
427
|
+
# -------------------------------------------------------------------------
|
|
428
|
+
# Multiple mutations before patch coalesce into a single diff
|
|
429
|
+
# -------------------------------------------------------------------------
|
|
430
|
+
|
|
431
|
+
def test_multiple_mutations_coalesce_in_single_patch
|
|
432
|
+
resource, ctl = build_resource(
|
|
433
|
+
metadata: { name: "my-config", namespace: "default" },
|
|
434
|
+
data: { host: "db-1", port: "5432", pool: "5" }
|
|
435
|
+
)
|
|
436
|
+
|
|
437
|
+
d = resource.instance_variable_get(:@data).data
|
|
438
|
+
d.host = "db-2"
|
|
439
|
+
d.port = "5433"
|
|
440
|
+
d.pool = "10"
|
|
441
|
+
|
|
442
|
+
ctl.stub_response("get", server_state(
|
|
443
|
+
metadata: { name: "my-config", namespace: "default" },
|
|
444
|
+
data: { host: "db-2", port: "5433", pool: "10" }
|
|
445
|
+
))
|
|
446
|
+
|
|
447
|
+
resource.patch
|
|
448
|
+
|
|
449
|
+
# Exactly one patch command
|
|
450
|
+
patch_commands = ctl.commands.select { |c| c.include?("patch") }
|
|
451
|
+
assert_equal 1, patch_commands.size
|
|
452
|
+
|
|
453
|
+
payload = JSON.parse(patch_commands.first.split("-p ").last)
|
|
454
|
+
|
|
455
|
+
# deep_diff produces [old, new] tuples for each changed leaf
|
|
456
|
+
assert_equal ["db-1", "db-2"], payload["data"]["host"]
|
|
457
|
+
assert_equal ["5432", "5433"], payload["data"]["port"]
|
|
458
|
+
assert_equal ["5", "10"], payload["data"]["pool"]
|
|
459
|
+
end
|
|
460
|
+
|
|
461
|
+
# -------------------------------------------------------------------------
|
|
462
|
+
# changes_applied mid-workflow resets the baseline
|
|
463
|
+
# -------------------------------------------------------------------------
|
|
464
|
+
|
|
465
|
+
def test_changes_applied_resets_baseline_without_server_roundtrip
|
|
466
|
+
resource, _ctl = build_resource(metadata: { name: "my-config" }, spec: { key: "v1" })
|
|
467
|
+
|
|
468
|
+
resource.instance_variable_get(:@data).spec.key = "v2"
|
|
469
|
+
assert resource.changed?
|
|
470
|
+
assert_equal([:spec], resource.changed)
|
|
471
|
+
|
|
472
|
+
# Accept changes locally (no kubectl call)
|
|
473
|
+
resource.changes_applied
|
|
474
|
+
|
|
475
|
+
refute resource.changed?
|
|
476
|
+
assert_equal({}, resource.changes)
|
|
477
|
+
|
|
478
|
+
# Further mutation is tracked from the new baseline
|
|
479
|
+
resource.instance_variable_get(:@data).spec.key = "v3"
|
|
480
|
+
assert resource.changed?
|
|
481
|
+
|
|
482
|
+
changes = resource.changes
|
|
483
|
+
# Old value should be v2 (the accepted baseline), not v1
|
|
484
|
+
assert_equal "v2", changes[:spec].is_a?(Hash) ? changes[:spec][:key]&.first : nil,
|
|
485
|
+
"baseline should be v2 after changes_applied" if changes[:spec].is_a?(Hash)
|
|
486
|
+
assert_equal({ spec: [{ key: "v2" }, { key: "v3" }] }, changes) if changes[:spec].is_a?(Array)
|
|
487
|
+
end
|
|
488
|
+
|
|
489
|
+
def test_changes_applied_then_patch_sends_only_subsequent_changes
|
|
490
|
+
resource, ctl = build_resource(
|
|
491
|
+
metadata: { name: "my-config", namespace: "default" },
|
|
492
|
+
data: { a: "1", b: "2", c: "3" }
|
|
493
|
+
)
|
|
494
|
+
|
|
495
|
+
# First wave of changes
|
|
496
|
+
resource.instance_variable_get(:@data).data.a = "changed-a"
|
|
497
|
+
resource.changes_applied
|
|
498
|
+
|
|
499
|
+
# Second wave — only b changes from the new baseline
|
|
500
|
+
resource.instance_variable_get(:@data).data.b = "changed-b"
|
|
501
|
+
|
|
502
|
+
ctl.stub_response("get", server_state(
|
|
503
|
+
metadata: { name: "my-config", namespace: "default" },
|
|
504
|
+
data: { a: "changed-a", b: "changed-b", c: "3" }
|
|
505
|
+
))
|
|
506
|
+
|
|
507
|
+
resource.patch
|
|
508
|
+
|
|
509
|
+
patch_cmd = ctl.commands.find { |c| c.include?("patch") }
|
|
510
|
+
payload = JSON.parse(patch_cmd.split("-p ").last)
|
|
511
|
+
|
|
512
|
+
# Only b should be in the patch, not a (already accepted via changes_applied)
|
|
513
|
+
# deep_diff produces [old, new] tuples
|
|
514
|
+
assert_equal ["2", "changed-b"], payload["data"]["b"]
|
|
515
|
+
refute payload["data"].key?("a"), "already-accepted change 'a' should not be in patch"
|
|
516
|
+
end
|
|
517
|
+
|
|
518
|
+
# -------------------------------------------------------------------------
|
|
519
|
+
# Dynamic attr_changed? tracks through full lifecycle
|
|
520
|
+
# -------------------------------------------------------------------------
|
|
521
|
+
|
|
522
|
+
def test_attr_changed_through_apply_mutate_patch_cycle
|
|
523
|
+
resource, ctl = build_resource(metadata: { name: "my-config", namespace: "default" }, spec: { key: "v1" })
|
|
524
|
+
|
|
525
|
+
ctl.stub_response("get", server_state(
|
|
526
|
+
metadata: { name: "my-config", namespace: "default" },
|
|
527
|
+
spec: { key: "v1" }
|
|
528
|
+
))
|
|
529
|
+
|
|
530
|
+
resource.apply
|
|
531
|
+
|
|
532
|
+
refute resource.spec_changed?, "spec should not be changed after apply"
|
|
533
|
+
refute resource.metadata_changed?, "metadata should not be changed after apply"
|
|
534
|
+
|
|
535
|
+
resource.instance_variable_get(:@data).spec.key = "v2"
|
|
536
|
+
|
|
537
|
+
assert resource.spec_changed?, "spec should be changed after mutation"
|
|
538
|
+
refute resource.metadata_changed?, "metadata should still not be changed"
|
|
539
|
+
|
|
540
|
+
ctl.stub_response("get", server_state(
|
|
541
|
+
metadata: { name: "my-config", namespace: "default" },
|
|
542
|
+
spec: { key: "v2" }
|
|
543
|
+
))
|
|
544
|
+
|
|
545
|
+
resource.patch
|
|
546
|
+
|
|
547
|
+
refute resource.spec_changed?, "spec should not be changed after patch"
|
|
548
|
+
end
|
|
549
|
+
|
|
550
|
+
def test_respond_to_for_dynamic_changed_predicates
|
|
551
|
+
resource, _ctl = build_resource(metadata: { name: "test" })
|
|
552
|
+
|
|
553
|
+
assert resource.respond_to?(:metadata_changed?)
|
|
554
|
+
assert resource.respond_to?(:spec_changed?)
|
|
555
|
+
assert resource.respond_to?(:anything_at_all_changed?)
|
|
556
|
+
refute resource.respond_to?(:some_random_method)
|
|
557
|
+
end
|
|
558
|
+
|
|
559
|
+
# -------------------------------------------------------------------------
|
|
560
|
+
# Snapshot isolation: reload doesn't leak into captured references
|
|
561
|
+
# -------------------------------------------------------------------------
|
|
562
|
+
|
|
563
|
+
def test_reload_does_not_corrupt_previously_captured_changes
|
|
564
|
+
resource, ctl = build_resource(metadata: { name: "my-config", namespace: "default" }, spec: { key: "v1" })
|
|
565
|
+
|
|
566
|
+
resource.instance_variable_get(:@data).spec.key = "v2"
|
|
567
|
+
|
|
568
|
+
# Capture changes before reload
|
|
569
|
+
changes_before = resource.changes
|
|
570
|
+
patch_before = resource.patch_data
|
|
571
|
+
|
|
572
|
+
# Reload with different server state
|
|
573
|
+
ctl.stub_response("get", server_state(
|
|
574
|
+
metadata: { name: "my-config", namespace: "default" },
|
|
575
|
+
spec: { key: "v3-from-server" }
|
|
576
|
+
))
|
|
577
|
+
|
|
578
|
+
resource.reload
|
|
579
|
+
|
|
580
|
+
# Previously captured hashes should be unaffected
|
|
581
|
+
assert_equal "v2", extract_nested_value(changes_before, :spec, :key, 1),
|
|
582
|
+
"previously captured changes should not be corrupted by reload"
|
|
583
|
+
assert_equal "v2", extract_nested_value(patch_before, :spec, :key, 1),
|
|
584
|
+
"previously captured patch_data should not be corrupted by reload"
|
|
585
|
+
end
|
|
586
|
+
|
|
587
|
+
def test_snapshot_isolation_across_multiple_changes_applied
|
|
588
|
+
resource, _ctl = build_resource(metadata: { name: "test" }, data: { counter: "1" })
|
|
589
|
+
|
|
590
|
+
resource.instance_variable_get(:@data).data.counter = "2"
|
|
591
|
+
snapshot_1_changes = resource.changes
|
|
592
|
+
|
|
593
|
+
resource.changes_applied
|
|
594
|
+
|
|
595
|
+
resource.instance_variable_get(:@data).data.counter = "3"
|
|
596
|
+
snapshot_2_changes = resource.changes
|
|
597
|
+
|
|
598
|
+
# Each snapshot's changes should be independent
|
|
599
|
+
assert_equal "1", extract_nested_value(snapshot_1_changes, :data, :counter, 0)
|
|
600
|
+
assert_equal "2", extract_nested_value(snapshot_1_changes, :data, :counter, 1)
|
|
601
|
+
|
|
602
|
+
assert_equal "2", extract_nested_value(snapshot_2_changes, :data, :counter, 0)
|
|
603
|
+
assert_equal "3", extract_nested_value(snapshot_2_changes, :data, :counter, 1)
|
|
604
|
+
end
|
|
605
|
+
|
|
606
|
+
# -------------------------------------------------------------------------
|
|
607
|
+
# Edge case: resource with no initial spec data
|
|
608
|
+
# -------------------------------------------------------------------------
|
|
609
|
+
|
|
610
|
+
def test_empty_resource_tracks_all_additions
|
|
611
|
+
resource, _ctl = build_resource(metadata: { name: "empty-config" })
|
|
612
|
+
|
|
613
|
+
resource.instance_variable_get(:@data).spec.key = "added"
|
|
614
|
+
|
|
615
|
+
assert resource.changed?
|
|
616
|
+
assert_includes resource.changed, :spec
|
|
617
|
+
end
|
|
618
|
+
|
|
619
|
+
# -------------------------------------------------------------------------
|
|
620
|
+
# Edge case: patch type parameter is forwarded
|
|
621
|
+
# -------------------------------------------------------------------------
|
|
622
|
+
|
|
623
|
+
def test_patch_forwards_type_parameter
|
|
624
|
+
resource, ctl = build_resource(metadata: { name: "my-config", namespace: "default" }, spec: { key: "v1" })
|
|
625
|
+
|
|
626
|
+
resource.instance_variable_get(:@data).spec.key = "v2"
|
|
627
|
+
|
|
628
|
+
ctl.stub_response("get", server_state(
|
|
629
|
+
metadata: { name: "my-config", namespace: "default" },
|
|
630
|
+
spec: { key: "v2" }
|
|
631
|
+
))
|
|
632
|
+
|
|
633
|
+
resource.patch(type: "merge")
|
|
634
|
+
|
|
635
|
+
patch_cmd = ctl.commands.find { |c| c.include?("patch") }
|
|
636
|
+
assert_includes patch_cmd, "--type merge", "patch type should be forwarded to kubectl"
|
|
637
|
+
end
|
|
638
|
+
|
|
639
|
+
def test_patch_defaults_to_strategic_type
|
|
640
|
+
resource, ctl = build_resource(metadata: { name: "my-config", namespace: "default" }, spec: { key: "v1" })
|
|
641
|
+
|
|
642
|
+
resource.instance_variable_get(:@data).spec.key = "v2"
|
|
643
|
+
|
|
644
|
+
ctl.stub_response("get", server_state(
|
|
645
|
+
metadata: { name: "my-config", namespace: "default" },
|
|
646
|
+
spec: { key: "v2" }
|
|
647
|
+
))
|
|
648
|
+
|
|
649
|
+
resource.patch
|
|
650
|
+
|
|
651
|
+
patch_cmd = ctl.commands.find { |c| c.include?("patch") }
|
|
652
|
+
assert_includes patch_cmd, "--type strategic"
|
|
653
|
+
end
|
|
654
|
+
|
|
655
|
+
# -------------------------------------------------------------------------
|
|
656
|
+
# Edge case: namespace flags are included correctly
|
|
657
|
+
# -------------------------------------------------------------------------
|
|
658
|
+
|
|
659
|
+
def test_patch_includes_namespace_flags
|
|
660
|
+
resource, ctl = build_resource(metadata: { name: "my-config", namespace: "kube-system" }, spec: { key: "v1" })
|
|
661
|
+
|
|
662
|
+
resource.instance_variable_get(:@data).spec.key = "v2"
|
|
663
|
+
|
|
664
|
+
ctl.stub_response("get", server_state(
|
|
665
|
+
metadata: { name: "my-config", namespace: "kube-system" },
|
|
666
|
+
spec: { key: "v2" }
|
|
667
|
+
))
|
|
668
|
+
|
|
669
|
+
resource.patch
|
|
670
|
+
|
|
671
|
+
patch_cmd = ctl.commands.find { |c| c.include?("patch") }
|
|
672
|
+
assert_includes patch_cmd, "--namespace kube-system"
|
|
673
|
+
end
|
|
674
|
+
|
|
675
|
+
def test_reload_includes_namespace_flags
|
|
676
|
+
resource, ctl = build_resource(metadata: { name: "my-config", namespace: "monitoring" }, spec: { key: "v1" })
|
|
677
|
+
|
|
678
|
+
ctl.stub_response("get", server_state(
|
|
679
|
+
metadata: { name: "my-config", namespace: "monitoring" },
|
|
680
|
+
spec: { key: "v1" }
|
|
681
|
+
))
|
|
682
|
+
|
|
683
|
+
resource.reload
|
|
684
|
+
|
|
685
|
+
get_cmd = ctl.commands.find { |c| c.include?("get") }
|
|
686
|
+
assert_includes get_cmd, "--namespace monitoring"
|
|
687
|
+
end
|
|
688
|
+
|
|
689
|
+
# -------------------------------------------------------------------------
|
|
690
|
+
# Edge case: delete on persisted resource issues command
|
|
691
|
+
# -------------------------------------------------------------------------
|
|
692
|
+
|
|
693
|
+
def test_delete_issues_kubectl_delete
|
|
694
|
+
resource, ctl = build_resource(metadata: { name: "my-config", namespace: "default" })
|
|
695
|
+
|
|
696
|
+
result = resource.delete
|
|
697
|
+
assert_equal true, result
|
|
698
|
+
|
|
699
|
+
delete_cmd = ctl.commands.find { |c| c.include?("delete") }
|
|
700
|
+
refute_nil delete_cmd
|
|
701
|
+
assert_includes delete_cmd, "configmap"
|
|
702
|
+
assert_includes delete_cmd, "my-config"
|
|
703
|
+
assert_includes delete_cmd, "--namespace default"
|
|
704
|
+
end
|
|
705
|
+
|
|
706
|
+
# -------------------------------------------------------------------------
|
|
707
|
+
# Regression: the original bug — build_changes used `result` instead of `hash`
|
|
708
|
+
# -------------------------------------------------------------------------
|
|
709
|
+
|
|
710
|
+
def test_changes_does_not_raise_name_error
|
|
711
|
+
resource, _ctl = build_resource(metadata: { name: "my-config" }, spec: { key: "v1" })
|
|
712
|
+
|
|
713
|
+
resource.instance_variable_get(:@data).spec.key = "v2"
|
|
714
|
+
|
|
715
|
+
# This would raise NameError with the original bug
|
|
716
|
+
changes = resource.changes
|
|
717
|
+
|
|
718
|
+
assert_kind_of Hash, changes
|
|
719
|
+
refute changes.empty?
|
|
720
|
+
end
|
|
721
|
+
|
|
722
|
+
private
|
|
723
|
+
|
|
724
|
+
# Navigate into nested change structures.
|
|
725
|
+
# changes[:spec] could be [old_hash, new_hash] or a nested diff hash.
|
|
726
|
+
def extract_nested_value(hash, top_key, nested_key, index)
|
|
727
|
+
val = hash[top_key]
|
|
728
|
+
case val
|
|
729
|
+
when Array
|
|
730
|
+
# [old_hash, new_hash]
|
|
731
|
+
val[index].is_a?(Hash) ? val[index][nested_key] : val[index]
|
|
732
|
+
when Hash
|
|
733
|
+
# nested diff: { key: [old, new] }
|
|
734
|
+
val[nested_key].is_a?(Array) ? val[nested_key][index] : val[nested_key]
|
|
735
|
+
end
|
|
736
|
+
end
|
|
737
|
+
end
|
|
738
|
+
end
|
data/lib/kube/cluster/version.rb
CHANGED
data/lib/kube/cluster.rb
CHANGED