kube_schema 1.3.1 → 1.3.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,659 @@
1
+ #!/usr/bin/env ruby
2
+ # frozen_string_literal: true
3
+
4
+ # Thorough example exercising vCluster Platform (management.loft.sh) schemas.
5
+ #
6
+ # Demonstrates:
7
+ # - Creating a multi-tenant vCluster Platform setup
8
+ # - Projects, Teams, Virtual Cluster Templates, Space Templates
9
+ # - Virtual Cluster Instances, Space Instances
10
+ # - RBAC: Cluster Access, Cluster Role Templates
11
+ # - Shared Secrets
12
+ # - Schema validation (valid and invalid resources)
13
+ # - Manifest assembly and YAML output
14
+ #
15
+ # Run: bundle exec ruby examples/vcluster.rb
16
+
17
+ require_relative "../lib/kube/schema"
18
+ require "tmpdir"
19
+
20
+ Manifest = Kube::Schema::Manifest
21
+
22
+ puts "=" * 60
23
+ puts "vCluster Platform (management.loft.sh) Schema Example"
24
+ puts "=" * 60
25
+ puts
26
+
27
+ # ── Verify loft types are loaded ────────────────────────────
28
+
29
+ schema = Kube::Schema["1.34"]
30
+ loft_kinds = schema.list_resources.select { |k|
31
+ entry = schema.send(:find_gvk_entry, k)
32
+ entry && entry[:group].include?("loft.sh")
33
+ }
34
+
35
+ puts "Available loft.sh resource kinds: #{loft_kinds.length}"
36
+ puts " #{loft_kinds.sort.first(10).join(", ")}..."
37
+ puts
38
+
39
+
40
+ # ══════════════════════════════════════════════════════════════
41
+ # 1. TEAMS — define who can access what
42
+ # ══════════════════════════════════════════════════════════════
43
+
44
+ platform_admins = Kube::Schema["Team"].new {
45
+ metadata.name = "platform-admins"
46
+ spec.displayName = "Platform Administrators"
47
+ spec.description = "Full access to all platform resources"
48
+ spec.users = ["admin@company.com", "ops-lead@company.com"]
49
+ spec.groups = ["platform-admins"]
50
+ spec.clusterRoles = [
51
+ { clusterRole: "cluster-admin" }
52
+ ]
53
+ spec.access = [
54
+ {
55
+ verbs: ["get", "update", "delete"],
56
+ subresources: ["*"],
57
+ users: ["admin@company.com"]
58
+ }
59
+ ]
60
+ }
61
+
62
+ dev_team = Kube::Schema["Team"].new {
63
+ metadata.name = "backend-devs"
64
+ spec.displayName = "Backend Developers"
65
+ spec.description = "Backend engineering team"
66
+ spec.users = [
67
+ "alice@company.com",
68
+ "bob@company.com",
69
+ "carol@company.com"
70
+ ]
71
+ spec.groups = ["engineering", "backend"]
72
+ }
73
+
74
+ ml_team = Kube::Schema["Team"].new {
75
+ metadata.name = "ml-engineers"
76
+ spec.displayName = "ML Engineers"
77
+ spec.description = "Machine learning and data platform team"
78
+ spec.users = [
79
+ "dave@company.com",
80
+ "eve@company.com"
81
+ ]
82
+ spec.groups = ["engineering", "ml"]
83
+ }
84
+
85
+ puts "1. Teams created:"
86
+ puts " - #{platform_admins.metadata[:name]}: #{platform_admins.spec[:users]&.length || 0} users"
87
+ puts " - #{dev_team.metadata[:name]}: #{dev_team.spec[:users]&.length || 0} users"
88
+ puts " - #{ml_team.metadata[:name]}: #{ml_team.spec[:users]&.length || 0} users"
89
+ puts
90
+
91
+
92
+ # ══════════════════════════════════════════════════════════════
93
+ # 2. CLUSTER ROLE TEMPLATES — reusable RBAC policies
94
+ # ══════════════════════════════════════════════════════════════
95
+
96
+ namespace_admin_role = Kube::Schema["ClusterRoleTemplate"].new {
97
+ metadata.name = "namespace-admin"
98
+ spec.displayName = "Namespace Admin"
99
+ spec.description = "Full admin access within assigned namespaces"
100
+ spec.access = [
101
+ {
102
+ verbs: ["get"],
103
+ subresources: ["*"],
104
+ teams: ["platform-admins"]
105
+ }
106
+ ]
107
+ spec.clusterRoleTemplate = {
108
+ metadata: {
109
+ labels: { "loft.sh/managed" => "true" }
110
+ },
111
+ rules: [
112
+ {
113
+ apiGroups: ["", "apps", "batch", "networking.k8s.io"],
114
+ resources: ["*"],
115
+ verbs: ["*"]
116
+ },
117
+ {
118
+ apiGroups: ["rbac.authorization.k8s.io"],
119
+ resources: ["roles", "rolebindings"],
120
+ verbs: ["*"]
121
+ }
122
+ ]
123
+ }
124
+ }
125
+
126
+ readonly_role = Kube::Schema["ClusterRoleTemplate"].new {
127
+ metadata.name = "readonly-viewer"
128
+ spec.displayName = "Read-Only Viewer"
129
+ spec.description = "Read-only access for auditing and troubleshooting"
130
+ spec.clusterRoleTemplate = {
131
+ rules: [
132
+ {
133
+ apiGroups: ["*"],
134
+ resources: ["*"],
135
+ verbs: ["get", "list", "watch"]
136
+ }
137
+ ]
138
+ }
139
+ }
140
+
141
+ puts "2. ClusterRoleTemplates created:"
142
+ puts " - #{namespace_admin_role.metadata[:name]}"
143
+ puts " - #{readonly_role.metadata[:name]}"
144
+ puts
145
+
146
+
147
+ # ══════════════════════════════════════════════════════════════
148
+ # 3. CLUSTER ACCESS — bind teams to cluster roles
149
+ # ══════════════════════════════════════════════════════════════
150
+
151
+ dev_cluster_access = Kube::Schema["ClusterAccess"].new {
152
+ metadata.name = "backend-dev-access"
153
+ spec.displayName = "Backend Dev Cluster Access"
154
+ spec.description = "Grants backend devs namespace-admin on dev clusters"
155
+ spec.clusters = ["dev-cluster-us", "dev-cluster-eu"]
156
+ spec.access = [
157
+ {
158
+ verbs: ["get"],
159
+ subresources: ["*"],
160
+ teams: ["backend-devs"]
161
+ }
162
+ ]
163
+ spec.localClusterAccessTemplate = {
164
+ spec: {
165
+ teams: ["backend-devs"],
166
+ clusterRole: "namespace-admin"
167
+ }
168
+ }
169
+ }
170
+
171
+ puts "3. ClusterAccess created:"
172
+ puts " - #{dev_cluster_access.metadata[:name]}: #{dev_cluster_access.spec[:clusters]&.length || 0} clusters"
173
+ puts
174
+
175
+
176
+ # ══════════════════════════════════════════════════════════════
177
+ # 4. VIRTUAL CLUSTER TEMPLATE — reusable vcluster blueprints
178
+ # ══════════════════════════════════════════════════════════════
179
+
180
+ standard_vcluster_template = Kube::Schema["VirtualClusterTemplate"].new {
181
+ metadata.name = "standard-vcluster"
182
+ metadata.labels = {
183
+ "loft.sh/tier" => "standard",
184
+ "company.com/managed" => "true"
185
+ }
186
+ spec.displayName = "Standard Virtual Cluster"
187
+ spec.description = "Default vCluster template with k3s, resource limits, and auto-sleep"
188
+ spec.owner = {
189
+ team: "platform-admins"
190
+ }
191
+ spec.template = {
192
+ metadata: {
193
+ labels: {
194
+ "loft.sh/template" => "standard-vcluster"
195
+ },
196
+ annotations: {
197
+ "loft.sh/custom-links" => "https://docs.internal/vclusters"
198
+ }
199
+ },
200
+ accessPoint: {
201
+ ingress: {}
202
+ },
203
+ helmRelease: {
204
+ chart: {
205
+ version: "0.33.0"
206
+ },
207
+ values: <<~YAML
208
+ sync:
209
+ toHost:
210
+ ingresses:
211
+ enabled: true
212
+ fromHost:
213
+ nodes:
214
+ enabled: true
215
+ networking:
216
+ replicateServices:
217
+ fromHost:
218
+ - from: kube-system/kube-dns
219
+ to: kube-system/kube-dns
220
+ YAML
221
+ },
222
+ spaceTemplate: {
223
+ metadata: {},
224
+ objects: <<~YAML
225
+ apiVersion: v1
226
+ kind: ResourceQuota
227
+ metadata:
228
+ name: vcluster-quota
229
+ spec:
230
+ hard:
231
+ requests.cpu: "4"
232
+ requests.memory: 8Gi
233
+ limits.cpu: "8"
234
+ limits.memory: 16Gi
235
+ pods: "50"
236
+ YAML
237
+ }
238
+ }
239
+ spec.access = [
240
+ {
241
+ verbs: ["get"],
242
+ subresources: ["*"],
243
+ teams: ["platform-admins", "backend-devs", "ml-engineers"]
244
+ }
245
+ ]
246
+ }
247
+
248
+ gpu_vcluster_template = Kube::Schema["VirtualClusterTemplate"].new {
249
+ metadata.name = "gpu-vcluster"
250
+ metadata.labels = {
251
+ "loft.sh/tier" => "gpu",
252
+ "company.com/managed" => "true"
253
+ }
254
+ spec.displayName = "GPU Virtual Cluster"
255
+ spec.description = "vCluster template for GPU workloads with NVIDIA device plugin sync"
256
+ spec.owner = { team: "platform-admins" }
257
+ spec.template = {
258
+ helmRelease: {
259
+ chart: { version: "0.33.0" },
260
+ values: <<~YAML
261
+ sync:
262
+ toHost:
263
+ ingresses:
264
+ enabled: true
265
+ fromHost:
266
+ nodes:
267
+ enabled: true
268
+ selector:
269
+ labels:
270
+ nvidia.com/gpu.present: "true"
271
+ YAML
272
+ },
273
+ spaceTemplate: {
274
+ objects: <<~YAML
275
+ apiVersion: v1
276
+ kind: ResourceQuota
277
+ metadata:
278
+ name: gpu-quota
279
+ spec:
280
+ hard:
281
+ requests.cpu: "16"
282
+ requests.memory: 64Gi
283
+ nvidia.com/gpu: "4"
284
+ pods: "20"
285
+ YAML
286
+ }
287
+ }
288
+ }
289
+
290
+ puts "4. VirtualClusterTemplates created:"
291
+ puts " - #{standard_vcluster_template.metadata[:name]}"
292
+ puts " - #{gpu_vcluster_template.metadata[:name]}"
293
+ puts
294
+
295
+
296
+ # ══════════════════════════════════════════════════════════════
297
+ # 5. SPACE TEMPLATE — namespace blueprints
298
+ # ══════════════════════════════════════════════════════════════
299
+
300
+ isolated_space_template = Kube::Schema["SpaceTemplate"].new {
301
+ metadata.name = "isolated-namespace"
302
+ spec.displayName = "Isolated Namespace"
303
+ spec.description = "Namespace with network policies that deny all ingress/egress by default"
304
+ spec.owner = { team: "platform-admins" }
305
+ spec.template = {
306
+ metadata: {
307
+ labels: {
308
+ "loft.sh/isolation" => "strict"
309
+ }
310
+ },
311
+ objects: <<~YAML
312
+ apiVersion: networking.k8s.io/v1
313
+ kind: NetworkPolicy
314
+ metadata:
315
+ name: default-deny-all
316
+ spec:
317
+ podSelector: {}
318
+ policyTypes:
319
+ - Ingress
320
+ - Egress
321
+ ---
322
+ apiVersion: v1
323
+ kind: LimitRange
324
+ metadata:
325
+ name: default-limits
326
+ spec:
327
+ limits:
328
+ - default:
329
+ cpu: 500m
330
+ memory: 512Mi
331
+ defaultRequest:
332
+ cpu: 100m
333
+ memory: 128Mi
334
+ type: Container
335
+ YAML
336
+ }
337
+ spec.access = [
338
+ {
339
+ verbs: ["get"],
340
+ subresources: ["*"],
341
+ teams: ["platform-admins", "backend-devs"]
342
+ }
343
+ ]
344
+ }
345
+
346
+ puts "5. SpaceTemplate created:"
347
+ puts " - #{isolated_space_template.metadata[:name]}"
348
+ puts
349
+
350
+
351
+ # ══════════════════════════════════════════════════════════════
352
+ # 6. PROJECT — multi-tenant isolation boundary
353
+ # ══════════════════════════════════════════════════════════════
354
+
355
+ backend_project = Kube::Schema["Project"].new {
356
+ metadata.name = "backend"
357
+ spec.displayName = "Backend Services"
358
+ spec.description = "Project for backend microservices team"
359
+ spec.owner = {
360
+ team: "platform-admins"
361
+ }
362
+ spec.members = [
363
+ {
364
+ team: "backend-devs",
365
+ clusterRole: "loft-management-project-admin"
366
+ },
367
+ {
368
+ team: "platform-admins",
369
+ clusterRole: "loft-management-project-admin"
370
+ }
371
+ ]
372
+ spec.allowedClusters = [
373
+ { name: "dev-cluster-us" },
374
+ { name: "dev-cluster-eu" },
375
+ { name: "staging-cluster" }
376
+ ]
377
+ spec.allowedTemplates = [
378
+ { kind: "VirtualClusterTemplate", name: "standard-vcluster" },
379
+ { kind: "SpaceTemplate", name: "isolated-namespace" }
380
+ ]
381
+ spec.quotas = {
382
+ project: {
383
+ "requests.cpu": "32",
384
+ "requests.memory": "64Gi",
385
+ "pods": "200",
386
+ "count/virtualclusterinstances": "5",
387
+ "count/spaceinstances": "10"
388
+ }
389
+ }
390
+ spec.requireTemplate = {
391
+ enforced: false
392
+ }
393
+ }
394
+
395
+ ml_project = Kube::Schema["Project"].new {
396
+ metadata.name = "ml-platform"
397
+ spec.displayName = "ML Platform"
398
+ spec.description = "Project for ML training and inference workloads"
399
+ spec.owner = { team: "platform-admins" }
400
+ spec.members = [
401
+ {
402
+ team: "ml-engineers",
403
+ clusterRole: "loft-management-project-admin"
404
+ }
405
+ ]
406
+ spec.allowedClusters = [
407
+ { name: "gpu-cluster-us" }
408
+ ]
409
+ spec.allowedTemplates = [
410
+ { kind: "VirtualClusterTemplate", name: "gpu-vcluster" },
411
+ { kind: "VirtualClusterTemplate", name: "standard-vcluster" }
412
+ ]
413
+ spec.quotas = {
414
+ project: {
415
+ "nvidia.com/gpu": "8",
416
+ "requests.cpu": "64",
417
+ "requests.memory": "256Gi"
418
+ }
419
+ }
420
+ }
421
+
422
+ puts "6. Projects created:"
423
+ puts " - #{backend_project.metadata[:name]}: #{backend_project.spec[:allowedClusters]&.length || 0} clusters, #{backend_project.spec[:members]&.length || 0} members"
424
+ puts " - #{ml_project.metadata[:name]}: #{ml_project.spec[:allowedClusters]&.length || 0} clusters, #{ml_project.spec[:members]&.length || 0} members"
425
+ puts
426
+
427
+
428
+ # ══════════════════════════════════════════════════════════════
429
+ # 7. VIRTUAL CLUSTER INSTANCES — actual vclusters
430
+ # ══════════════════════════════════════════════════════════════
431
+
432
+ dev_vcluster = Kube::Schema["VirtualClusterInstance"].new {
433
+ metadata.name = "backend-dev"
434
+ metadata.namespace = "loft-p-backend"
435
+ metadata.labels = {
436
+ "app.kubernetes.io/managed-by" => "loft",
437
+ "env" => "development"
438
+ }
439
+ metadata.annotations = {
440
+ "loft.sh/custom-links" => "https://grafana.internal/d/vcluster?var-name=backend-dev"
441
+ }
442
+ spec.displayName = "Backend Dev Cluster"
443
+ spec.description = "Development vCluster for the backend team"
444
+ spec.owner = { team: "backend-devs" }
445
+ spec.templateRef = {
446
+ name: "standard-vcluster"
447
+ }
448
+ spec.clusterRef = {
449
+ cluster: "dev-cluster-us",
450
+ namespace: "loft-backend-dev"
451
+ }
452
+ spec.access = [
453
+ {
454
+ verbs: ["get", "update"],
455
+ subresources: ["*"],
456
+ teams: ["backend-devs"]
457
+ }
458
+ ]
459
+ }
460
+
461
+ staging_vcluster = Kube::Schema["VirtualClusterInstance"].new {
462
+ metadata.name = "backend-staging"
463
+ metadata.namespace = "loft-p-backend"
464
+ metadata.labels = {
465
+ "app.kubernetes.io/managed-by" => "loft",
466
+ "env" => "staging"
467
+ }
468
+ spec.displayName = "Backend Staging Cluster"
469
+ spec.description = "Staging vCluster that mirrors production topology"
470
+ spec.owner = { team: "platform-admins" }
471
+ spec.templateRef = {
472
+ name: "standard-vcluster"
473
+ }
474
+ spec.clusterRef = {
475
+ cluster: "staging-cluster",
476
+ namespace: "loft-backend-staging"
477
+ }
478
+ spec.parameters = "sleepAfter: 7200\ndeleteAfter: 604800"
479
+ }
480
+
481
+ ml_training_vcluster = Kube::Schema["VirtualClusterInstance"].new {
482
+ metadata.name = "ml-training"
483
+ metadata.namespace = "loft-p-ml-platform"
484
+ metadata.labels = {
485
+ "env" => "training",
486
+ "workload-type" => "gpu"
487
+ }
488
+ spec.displayName = "ML Training Cluster"
489
+ spec.description = "GPU-enabled vCluster for model training"
490
+ spec.owner = { team: "ml-engineers" }
491
+ spec.templateRef = {
492
+ name: "gpu-vcluster"
493
+ }
494
+ spec.clusterRef = {
495
+ cluster: "gpu-cluster-us"
496
+ }
497
+ }
498
+
499
+ puts "7. VirtualClusterInstances created:"
500
+ puts " - #{dev_vcluster.metadata[:name]} (#{dev_vcluster.spec[:templateRef][:name]})"
501
+ puts " - #{staging_vcluster.metadata[:name]} (#{staging_vcluster.spec[:templateRef][:name]})"
502
+ puts " - #{ml_training_vcluster.metadata[:name]} (#{ml_training_vcluster.spec[:templateRef][:name]})"
503
+ puts
504
+
505
+
506
+ # ══════════════════════════════════════════════════════════════
507
+ # 8. SPACE INSTANCES — managed namespaces
508
+ # ══════════════════════════════════════════════════════════════
509
+
510
+ ci_space = Kube::Schema["SpaceInstance"].new {
511
+ metadata.name = "ci-runners"
512
+ metadata.namespace = "loft-p-backend"
513
+ spec.displayName = "CI Runners"
514
+ spec.description = "Isolated namespace for CI/CD pipeline runners"
515
+ spec.owner = { team: "platform-admins" }
516
+ spec.templateRef = {
517
+ name: "isolated-namespace"
518
+ }
519
+ spec.clusterRef = {
520
+ cluster: "dev-cluster-us"
521
+ }
522
+ }
523
+
524
+ puts "8. SpaceInstance created:"
525
+ puts " - #{ci_space.metadata[:name]}"
526
+ puts
527
+
528
+
529
+ # ══════════════════════════════════════════════════════════════
530
+ # 9. SHARED SECRETS — cross-project secrets
531
+ # ══════════════════════════════════════════════════════════════
532
+
533
+ docker_creds = Kube::Schema["SharedSecret"].new {
534
+ metadata.name = "registry-credentials"
535
+ metadata.namespace = "loft-p-backend"
536
+ spec.displayName = "Container Registry Credentials"
537
+ spec.description = "Shared credentials for pulling from the private container registry"
538
+ spec.access = [
539
+ {
540
+ verbs: ["get"],
541
+ subresources: ["*"],
542
+ teams: ["backend-devs", "ml-engineers"]
543
+ }
544
+ ]
545
+ spec.data = {
546
+ ".dockerconfigjson" => "eyJhdXRocyI6eyJyZWdpc3RyeS5jb21wYW55LmNvbSI6e319fQ=="
547
+ }
548
+ }
549
+
550
+ puts "9. SharedSecret created:"
551
+ puts " - #{docker_creds.metadata[:name]}"
552
+ puts
553
+
554
+
555
+ # ══════════════════════════════════════════════════════════════
556
+ # 10. VALIDATION — prove schemas actually validate
557
+ # ══════════════════════════════════════════════════════════════
558
+
559
+ puts "10. Validation checks:"
560
+
561
+ # All resources should be valid
562
+ all_resources = [
563
+ platform_admins, dev_team, ml_team,
564
+ namespace_admin_role, readonly_role,
565
+ dev_cluster_access,
566
+ standard_vcluster_template, gpu_vcluster_template,
567
+ isolated_space_template,
568
+ backend_project, ml_project,
569
+ dev_vcluster, staging_vcluster, ml_training_vcluster,
570
+ ci_space,
571
+ docker_creds
572
+ ]
573
+
574
+ all_resources.each do |r|
575
+ kind = r.to_h[:kind]
576
+ name = r.metadata[:name]
577
+ puts " #{kind}/#{name}: valid? = #{r.valid?}"
578
+ end
579
+
580
+ # Deliberately invalid resource: wrong types
581
+ puts
582
+ puts " Checking invalid resource detection..."
583
+ bad_vci = Kube::Schema["VirtualClusterInstance"].new {
584
+ metadata.name = 99999 # should be string
585
+ spec.description = ["not", "a", "string"] # should be string
586
+ }
587
+ puts " Invalid VCI valid? = #{bad_vci.valid?}"
588
+
589
+ begin
590
+ bad_vci.valid!
591
+ rescue Kube::ValidationError => e
592
+ puts " Caught ValidationError: #{e.errors.length} error(s)"
593
+ e.errors.each do |err|
594
+ puts " - #{err["data_pointer"]}: #{err["type"]}"
595
+ end
596
+ end
597
+ puts
598
+
599
+
600
+ # ══════════════════════════════════════════════════════════════
601
+ # 11. MANIFEST — assemble and output YAML
602
+ # ══════════════════════════════════════════════════════════════
603
+
604
+ puts "11. Assembling full platform manifest..."
605
+
606
+ manifest = Manifest.new
607
+
608
+ # RBAC layer
609
+ manifest << platform_admins
610
+ manifest << dev_team
611
+ manifest << ml_team
612
+ manifest << namespace_admin_role
613
+ manifest << readonly_role
614
+ manifest << dev_cluster_access
615
+
616
+ # Templates
617
+ manifest << standard_vcluster_template
618
+ manifest << gpu_vcluster_template
619
+ manifest << isolated_space_template
620
+
621
+ # Projects
622
+ manifest << backend_project
623
+ manifest << ml_project
624
+
625
+ # Instances
626
+ manifest << dev_vcluster
627
+ manifest << staging_vcluster
628
+ manifest << ml_training_vcluster
629
+ manifest << ci_space
630
+
631
+ # Secrets
632
+ manifest << docker_creds
633
+
634
+ puts " #{manifest.size} resources in manifest"
635
+ puts
636
+
637
+ # Write to temp file and read back
638
+ tmpdir = Dir.mktmpdir("vcluster_example")
639
+ path = File.join(tmpdir, "platform.yaml")
640
+ manifest.write(path)
641
+ puts " Written to: #{path}"
642
+ puts " File size: #{File.size(path)} bytes"
643
+
644
+ # Round-trip: read it back
645
+ loaded = Manifest.open(path)
646
+ puts " Round-trip loaded: #{loaded.size} resources"
647
+ puts
648
+
649
+ # Print the first few resources as YAML
650
+ puts "=" * 60
651
+ puts "YAML Output (first 3 resources):"
652
+ puts "=" * 60
653
+ manifest.first(3).each { |r| puts r.to_yaml }
654
+
655
+ # Cleanup
656
+ FileUtils.rm_rf(tmpdir)
657
+
658
+ puts
659
+ puts "Done."