@xdev-asia/xdev-knowledge-mcp 1.0.44 → 1.0.46
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/content/series/luyen-thi/luyen-thi-cka/chapters/01-cluster-architecture/lessons/01-kien-truc-cka-kubeadm.md +133 -0
- package/content/series/luyen-thi/luyen-thi-cka/chapters/01-cluster-architecture/lessons/02-cluster-upgrade-kubeadm.md +147 -0
- package/content/series/luyen-thi/luyen-thi-cka/chapters/01-cluster-architecture/lessons/03-rbac-cka.md +152 -0
- package/content/series/luyen-thi/luyen-thi-cka/chapters/02-workloads-scheduling/lessons/04-deployments-daemonsets-statefulsets.md +186 -0
- package/content/series/luyen-thi/luyen-thi-cka/chapters/02-workloads-scheduling/lessons/05-scheduling-taints-affinity.md +163 -0
- package/content/series/luyen-thi/luyen-thi-cka/chapters/03-services-networking/lessons/06-services-endpoints-coredns.md +145 -0
- package/content/series/luyen-thi/luyen-thi-cka/chapters/03-services-networking/lessons/07-ingress-networkpolicies-cni.md +172 -0
- package/content/series/luyen-thi/luyen-thi-cka/chapters/04-storage/lessons/08-persistent-volumes-storageclass.md +159 -0
- package/content/series/luyen-thi/luyen-thi-cka/chapters/05-troubleshooting/lessons/09-etcd-backup-restore.md +149 -0
- package/content/series/luyen-thi/luyen-thi-cka/chapters/05-troubleshooting/lessons/10-troubleshooting-nodes.md +153 -0
- package/content/series/luyen-thi/luyen-thi-cka/chapters/05-troubleshooting/lessons/11-troubleshooting-workloads.md +146 -0
- package/content/series/luyen-thi/luyen-thi-cka/chapters/05-troubleshooting/lessons/12-troubleshooting-networking-exam.md +170 -0
- package/content/series/luyen-thi/luyen-thi-cka/index.md +7 -7
- package/content/series/luyen-thi/luyen-thi-ckad/chapters/01-app-design-build/lessons/01-multi-container-pods.md +146 -0
- package/content/series/luyen-thi/luyen-thi-ckad/chapters/01-app-design-build/lessons/02-jobs-cronjobs-resources.md +174 -0
- package/content/series/luyen-thi/luyen-thi-ckad/chapters/02-app-deployment/lessons/03-rolling-updates-rollbacks.md +148 -0
- package/content/series/luyen-thi/luyen-thi-ckad/chapters/02-app-deployment/lessons/04-helm-kustomize.md +181 -0
- package/content/series/luyen-thi/luyen-thi-ckad/chapters/03-app-observability/lessons/05-probes-logging-debugging.md +183 -0
- package/content/series/luyen-thi/luyen-thi-ckad/chapters/04-app-environment-config/lessons/06-configmaps-secrets.md +182 -0
- package/content/series/luyen-thi/luyen-thi-ckad/chapters/04-app-environment-config/lessons/07-securitycontext-pod-security.md +168 -0
- package/content/series/luyen-thi/luyen-thi-ckad/chapters/04-app-environment-config/lessons/08-resources-qos.md +168 -0
- package/content/series/luyen-thi/luyen-thi-ckad/chapters/05-services-networking/lessons/09-services-ingress.md +182 -0
- package/content/series/luyen-thi/luyen-thi-ckad/chapters/05-services-networking/lessons/10-networkpolicies-exam-strategy.md +236 -0
- package/content/series/luyen-thi/luyen-thi-ckad/index.md +7 -7
- package/content/series/luyen-thi/luyen-thi-kcna/chapters/01-kubernetes-fundamentals/lessons/01-kien-truc-kubernetes.md +137 -0
- package/content/series/luyen-thi/luyen-thi-kcna/chapters/01-kubernetes-fundamentals/lessons/02-pods-workloads-controllers.md +142 -0
- package/content/series/luyen-thi/luyen-thi-kcna/chapters/01-kubernetes-fundamentals/lessons/03-services-networking-storage.md +155 -0
- package/content/series/luyen-thi/luyen-thi-kcna/chapters/01-kubernetes-fundamentals/lessons/04-rbac-security.md +137 -0
- package/content/series/luyen-thi/luyen-thi-kcna/chapters/02-container-orchestration/lessons/05-container-runtimes-oci.md +137 -0
- package/content/series/luyen-thi/luyen-thi-kcna/chapters/02-container-orchestration/lessons/06-orchestration-patterns.md +147 -0
- package/content/series/luyen-thi/luyen-thi-kcna/chapters/03-cloud-native-architecture/lessons/07-cloud-native-architecture.md +143 -0
- package/content/series/luyen-thi/luyen-thi-kcna/chapters/04-observability-delivery/lessons/08-observability.md +143 -0
- package/content/series/luyen-thi/luyen-thi-kcna/chapters/04-observability-delivery/lessons/09-helm-gitops-cicd.md +162 -0
- package/content/series/luyen-thi/luyen-thi-kcna/index.md +1 -1
- package/data/quizzes.json +1059 -0
- package/package.json +1 -1
package/data/quizzes.json
CHANGED
|
@@ -760,5 +760,1064 @@
|
|
|
760
760
|
"explanation": "Continuous Training tự động kích hoạt retrain pipeline khi: data mới đến (scheduled), data drift vượt threshold, hoặc model performance giảm — đảm bảo model luôn fresh."
|
|
761
761
|
}
|
|
762
762
|
]
|
|
763
|
+
},
|
|
764
|
+
{
|
|
765
|
+
"id": "kcna",
|
|
766
|
+
"title": "KCNA — Kubernetes and Cloud Native Associate",
|
|
767
|
+
"slug": "kcna",
|
|
768
|
+
"description": "Practice exam for KCNA — 20 multiple-choice questions covering all 5 domains",
|
|
769
|
+
"icon": "award",
|
|
770
|
+
"provider": "CNCF",
|
|
771
|
+
"level": "Associate",
|
|
772
|
+
"duration_minutes": 30,
|
|
773
|
+
"passing_score": 75,
|
|
774
|
+
"questions_count": 20,
|
|
775
|
+
"tags": [
|
|
776
|
+
"Kubernetes",
|
|
777
|
+
"CNCF",
|
|
778
|
+
"Cloud Native",
|
|
779
|
+
"DevOps"
|
|
780
|
+
],
|
|
781
|
+
"series_slug": "luyen-thi-kcna",
|
|
782
|
+
"domains": [
|
|
783
|
+
{
|
|
784
|
+
"name": "Domain 1: Kubernetes Fundamentals",
|
|
785
|
+
"weight": 46,
|
|
786
|
+
"lessons": [
|
|
787
|
+
{
|
|
788
|
+
"title": "Bài 1: Kubernetes Architecture & Core Components",
|
|
789
|
+
"slug": "01-kien-truc-kubernetes"
|
|
790
|
+
},
|
|
791
|
+
{
|
|
792
|
+
"title": "Bài 2: Pods, Workloads & Controllers",
|
|
793
|
+
"slug": "02-pods-workloads-controllers"
|
|
794
|
+
},
|
|
795
|
+
{
|
|
796
|
+
"title": "Bài 3: Services, Networking & Storage",
|
|
797
|
+
"slug": "03-services-networking-storage"
|
|
798
|
+
},
|
|
799
|
+
{
|
|
800
|
+
"title": "Bài 4: RBAC & Security Basics",
|
|
801
|
+
"slug": "04-rbac-security"
|
|
802
|
+
}
|
|
803
|
+
]
|
|
804
|
+
},
|
|
805
|
+
{
|
|
806
|
+
"name": "Domain 2: Container Orchestration",
|
|
807
|
+
"weight": 22,
|
|
808
|
+
"lessons": [
|
|
809
|
+
{
|
|
810
|
+
"title": "Bài 5: Container Runtimes & OCI Standards",
|
|
811
|
+
"slug": "05-container-runtimes-oci"
|
|
812
|
+
},
|
|
813
|
+
{
|
|
814
|
+
"title": "Bài 6: Container Orchestration Patterns",
|
|
815
|
+
"slug": "06-orchestration-patterns"
|
|
816
|
+
}
|
|
817
|
+
]
|
|
818
|
+
},
|
|
819
|
+
{
|
|
820
|
+
"name": "Domain 3: Cloud Native Architecture",
|
|
821
|
+
"weight": 16,
|
|
822
|
+
"lessons": [
|
|
823
|
+
{
|
|
824
|
+
"title": "Bài 7: Cloud Native Architecture & Design Patterns",
|
|
825
|
+
"slug": "07-cloud-native-architecture"
|
|
826
|
+
}
|
|
827
|
+
]
|
|
828
|
+
},
|
|
829
|
+
{
|
|
830
|
+
"name": "Domain 4 & 5: Observability & Application Delivery",
|
|
831
|
+
"weight": 16,
|
|
832
|
+
"lessons": [
|
|
833
|
+
{
|
|
834
|
+
"title": "Bài 8: Cloud Native Observability",
|
|
835
|
+
"slug": "08-observability"
|
|
836
|
+
},
|
|
837
|
+
{
|
|
838
|
+
"title": "Bài 9: Application Delivery — Helm, GitOps & CI/CD",
|
|
839
|
+
"slug": "09-helm-gitops-cicd"
|
|
840
|
+
}
|
|
841
|
+
]
|
|
842
|
+
}
|
|
843
|
+
],
|
|
844
|
+
"questions": [
|
|
845
|
+
{
|
|
846
|
+
"id": 1,
|
|
847
|
+
"domain": "Kubernetes Fundamentals",
|
|
848
|
+
"question": "Which Kubernetes component is responsible for watching for newly created Pods with no assigned node and selecting a node for them to run on?",
|
|
849
|
+
"options": [
|
|
850
|
+
"kube-apiserver",
|
|
851
|
+
"kube-scheduler",
|
|
852
|
+
"kube-controller-manager",
|
|
853
|
+
"kubelet"
|
|
854
|
+
],
|
|
855
|
+
"correct": 1,
|
|
856
|
+
"explanation": "kube-scheduler watches for newly created Pods and selects the best node for them based on resource availability, constraints, and policies."
|
|
857
|
+
},
|
|
858
|
+
{
|
|
859
|
+
"id": 2,
|
|
860
|
+
"domain": "Kubernetes Fundamentals",
|
|
861
|
+
"question": "Which component stores all cluster state in Kubernetes?",
|
|
862
|
+
"options": [
|
|
863
|
+
"kube-apiserver",
|
|
864
|
+
"kube-controller-manager",
|
|
865
|
+
"etcd",
|
|
866
|
+
"kubelet"
|
|
867
|
+
],
|
|
868
|
+
"correct": 2,
|
|
869
|
+
"explanation": "etcd is the consistent and highly-available key-value store used as Kubernetes' backing store for all cluster state."
|
|
870
|
+
},
|
|
871
|
+
{
|
|
872
|
+
"id": 3,
|
|
873
|
+
"domain": "Kubernetes Fundamentals",
|
|
874
|
+
"question": "A Pod is in 'Pending' state. What is the MOST likely reason?",
|
|
875
|
+
"options": [
|
|
876
|
+
"The container application crashed",
|
|
877
|
+
"No node satisfies the scheduling requirements",
|
|
878
|
+
"The image pull failed",
|
|
879
|
+
"A liveness probe failed"
|
|
880
|
+
],
|
|
881
|
+
"correct": 1,
|
|
882
|
+
"explanation": "Pending means the Pod has been accepted but one or more containers have not been started — most commonly because no node has sufficient resources or the Pod doesn't match any node's constraints."
|
|
883
|
+
},
|
|
884
|
+
{
|
|
885
|
+
"id": 4,
|
|
886
|
+
"domain": "Kubernetes Fundamentals",
|
|
887
|
+
"question": "Which Kubernetes resource ensures exactly one Pod runs on every node in the cluster?",
|
|
888
|
+
"options": [
|
|
889
|
+
"Deployment",
|
|
890
|
+
"ReplicaSet",
|
|
891
|
+
"StatefulSet",
|
|
892
|
+
"DaemonSet"
|
|
893
|
+
],
|
|
894
|
+
"correct": 3,
|
|
895
|
+
"explanation": "A DaemonSet ensures that all (or some) Nodes run a copy of a Pod — commonly used for log collectors, monitoring agents, and network plugins."
|
|
896
|
+
},
|
|
897
|
+
{
|
|
898
|
+
"id": 5,
|
|
899
|
+
"domain": "Kubernetes Fundamentals",
|
|
900
|
+
"question": "What is the default Service type in Kubernetes?",
|
|
901
|
+
"options": [
|
|
902
|
+
"NodePort",
|
|
903
|
+
"LoadBalancer",
|
|
904
|
+
"ClusterIP",
|
|
905
|
+
"ExternalName"
|
|
906
|
+
],
|
|
907
|
+
"correct": 2,
|
|
908
|
+
"explanation": "ClusterIP is the default Service type. It exposes the Service on a cluster-internal IP, making it only accessible from within the cluster."
|
|
909
|
+
},
|
|
910
|
+
{
|
|
911
|
+
"id": 6,
|
|
912
|
+
"domain": "Kubernetes Fundamentals",
|
|
913
|
+
"question": "Which access mode allows a PersistentVolume to be mounted as read-write by many nodes simultaneously?",
|
|
914
|
+
"options": [
|
|
915
|
+
"ReadWriteOnce (RWO)",
|
|
916
|
+
"ReadOnlyMany (ROX)",
|
|
917
|
+
"ReadWriteMany (RWX)",
|
|
918
|
+
"ReadWriteOncePod (RWOP)"
|
|
919
|
+
],
|
|
920
|
+
"correct": 2,
|
|
921
|
+
"explanation": "ReadWriteMany (RWX) allows the volume to be mounted as read-write by many nodes simultaneously, required for shared storage scenarios."
|
|
922
|
+
},
|
|
923
|
+
{
|
|
924
|
+
"id": 7,
|
|
925
|
+
"domain": "Kubernetes Fundamentals",
|
|
926
|
+
"question": "Which RBAC resource grants permissions within a specific namespace?",
|
|
927
|
+
"options": [
|
|
928
|
+
"ClusterRole",
|
|
929
|
+
"ClusterRoleBinding",
|
|
930
|
+
"Role",
|
|
931
|
+
"ServiceAccount"
|
|
932
|
+
],
|
|
933
|
+
"correct": 2,
|
|
934
|
+
"explanation": "Role is namespace-scoped. ClusterRole is cluster-wide. A Role combined with a RoleBinding grants permissions within a specific namespace."
|
|
935
|
+
},
|
|
936
|
+
{
|
|
937
|
+
"id": 8,
|
|
938
|
+
"domain": "Kubernetes Fundamentals",
|
|
939
|
+
"question": "In a StatefulSet, what is the naming pattern for Pods?",
|
|
940
|
+
"options": [
|
|
941
|
+
"Random hash suffix (e.g., web-abc12)",
|
|
942
|
+
"Incrementing ordinal (e.g., web-0, web-1)",
|
|
943
|
+
"Timestamp suffix",
|
|
944
|
+
"UUID suffix"
|
|
945
|
+
],
|
|
946
|
+
"correct": 1,
|
|
947
|
+
"explanation": "StatefulSet Pods have a stable, unique ordinal identity (web-0, web-1, web-2). This predictable naming enables ordered deployment, scaling, and rolling updates."
|
|
948
|
+
},
|
|
949
|
+
{
|
|
950
|
+
"id": 9,
|
|
951
|
+
"domain": "Container Orchestration",
|
|
952
|
+
"question": "Which is the default container runtime interface (CRI) used by Kubernetes since v1.20?",
|
|
953
|
+
"options": [
|
|
954
|
+
"Docker",
|
|
955
|
+
"containerd",
|
|
956
|
+
"CRI-O",
|
|
957
|
+
"rkt"
|
|
958
|
+
],
|
|
959
|
+
"correct": 1,
|
|
960
|
+
"explanation": "containerd is the default CRI since Kubernetes deprecated Dockershim in v1.20. containerd is lightweight and directly implements the CRI specification."
|
|
961
|
+
},
|
|
962
|
+
{
|
|
963
|
+
"id": 10,
|
|
964
|
+
"domain": "Container Orchestration",
|
|
965
|
+
"question": "What does OCI stand for in the context of containers?",
|
|
966
|
+
"options": [
|
|
967
|
+
"Open Container Interface",
|
|
968
|
+
"Open Container Initiative",
|
|
969
|
+
"Oracle Container Infrastructure",
|
|
970
|
+
"Orchestration Container Interface"
|
|
971
|
+
],
|
|
972
|
+
"correct": 1,
|
|
973
|
+
"explanation": "OCI stands for Open Container Initiative — an open governance structure for creating open industry standards around container formats and runtimes."
|
|
974
|
+
},
|
|
975
|
+
{
|
|
976
|
+
"id": 11,
|
|
977
|
+
"domain": "Container Orchestration",
|
|
978
|
+
"question": "Which Kubernetes feature automatically scales the number of Pod replicas based on CPU utilization?",
|
|
979
|
+
"options": [
|
|
980
|
+
"Vertical Pod Autoscaler",
|
|
981
|
+
"Horizontal Pod Autoscaler",
|
|
982
|
+
"Cluster Autoscaler",
|
|
983
|
+
"KEDA"
|
|
984
|
+
],
|
|
985
|
+
"correct": 1,
|
|
986
|
+
"explanation": "Horizontal Pod Autoscaler (HPA) automatically scales the number of Pod replicas based on observed CPU utilization or custom metrics."
|
|
987
|
+
},
|
|
988
|
+
{
|
|
989
|
+
"id": 12,
|
|
990
|
+
"domain": "Cloud Native Architecture",
|
|
991
|
+
"question": "According to the 12-factor app methodology, how should application configuration be stored?",
|
|
992
|
+
"options": [
|
|
993
|
+
"In the container image",
|
|
994
|
+
"In config files committed to git",
|
|
995
|
+
"In environment variables",
|
|
996
|
+
"In a database"
|
|
997
|
+
],
|
|
998
|
+
"correct": 2,
|
|
999
|
+
"explanation": "Factor III of 12-factor app: Store config in the environment. Configuration that varies between deployments should be stored in environment variables, not in code."
|
|
1000
|
+
},
|
|
1001
|
+
{
|
|
1002
|
+
"id": 13,
|
|
1003
|
+
"domain": "Cloud Native Architecture",
|
|
1004
|
+
"question": "What is the primary purpose of a Service Mesh in a microservices architecture?",
|
|
1005
|
+
"options": [
|
|
1006
|
+
"Database connection pooling",
|
|
1007
|
+
"Managing service-to-service communication, observability, and security",
|
|
1008
|
+
"Container image distribution",
|
|
1009
|
+
"DNS resolution for services"
|
|
1010
|
+
],
|
|
1011
|
+
"correct": 1,
|
|
1012
|
+
"explanation": "A service mesh (like Istio or Linkerd) handles service-to-service communication with features like load balancing, mTLS encryption, circuit breaking, observability, and traffic management."
|
|
1013
|
+
},
|
|
1014
|
+
{
|
|
1015
|
+
"id": 14,
|
|
1016
|
+
"domain": "Cloud Native Architecture",
|
|
1017
|
+
"question": "Which CNCF project is the primary service mesh used in production Kubernetes environments?",
|
|
1018
|
+
"options": [
|
|
1019
|
+
"Helm",
|
|
1020
|
+
"Istio",
|
|
1021
|
+
"Argo CD",
|
|
1022
|
+
"Prometheus"
|
|
1023
|
+
],
|
|
1024
|
+
"correct": 1,
|
|
1025
|
+
"explanation": "Istio is the most widely adopted service mesh. It provides traffic management, security (mTLS), and observability via an Envoy sidecar proxy injected into each Pod."
|
|
1026
|
+
},
|
|
1027
|
+
{
|
|
1028
|
+
"id": 15,
|
|
1029
|
+
"domain": "Cloud Native Architecture",
|
|
1030
|
+
"question": "What differentiates serverless computing from traditional container deployments?",
|
|
1031
|
+
"options": [
|
|
1032
|
+
"Serverless uses more servers",
|
|
1033
|
+
"Serverless abstracts server management; scales to zero when idle",
|
|
1034
|
+
"Serverless requires container orchestration",
|
|
1035
|
+
"Serverless is only for batch workloads"
|
|
1036
|
+
],
|
|
1037
|
+
"correct": 1,
|
|
1038
|
+
"explanation": "Serverless abstracts infrastructure management — the platform handles scaling, including scaling to zero when there's no traffic, so you pay only for actual execution time."
|
|
1039
|
+
},
|
|
1040
|
+
{
|
|
1041
|
+
"id": 16,
|
|
1042
|
+
"domain": "Cloud Native Observability",
|
|
1043
|
+
"question": "Which is the standard CNCF observability data format that unifies metrics, logs, and traces?",
|
|
1044
|
+
"options": [
|
|
1045
|
+
"Prometheus Remote Write",
|
|
1046
|
+
"OpenTelemetry",
|
|
1047
|
+
"Fluentd",
|
|
1048
|
+
"Grafana Loki"
|
|
1049
|
+
],
|
|
1050
|
+
"correct": 1,
|
|
1051
|
+
"explanation": "OpenTelemetry (OTel) is the CNCF standard for observability — providing a single set of APIs, SDKs, and tools to collect, process, and export metrics, logs, and traces."
|
|
1052
|
+
},
|
|
1053
|
+
{
|
|
1054
|
+
"id": 17,
|
|
1055
|
+
"domain": "Cloud Native Observability",
|
|
1056
|
+
"question": "In the SLO/SLI/SLA framework, what does SLI stand for?",
|
|
1057
|
+
"options": [
|
|
1058
|
+
"Service Level Infrastructure",
|
|
1059
|
+
"Service Level Indicator",
|
|
1060
|
+
"System Load Index",
|
|
1061
|
+
"Service Latency Index"
|
|
1062
|
+
],
|
|
1063
|
+
"correct": 1,
|
|
1064
|
+
"explanation": "SLI (Service Level Indicator) is a quantitative measure of service behavior. Examples: request latency, error rate, throughput. SLOs are targets based on SLIs."
|
|
1065
|
+
},
|
|
1066
|
+
{
|
|
1067
|
+
"id": 18,
|
|
1068
|
+
"domain": "Application Delivery",
|
|
1069
|
+
"question": "What is GitOps?",
|
|
1070
|
+
"options": [
|
|
1071
|
+
"A git branching strategy",
|
|
1072
|
+
"A practice where Git is the single source of truth for declarative infrastructure and code",
|
|
1073
|
+
"A CI/CD tool",
|
|
1074
|
+
"A container registry"
|
|
1075
|
+
],
|
|
1076
|
+
"correct": 1,
|
|
1077
|
+
"explanation": "GitOps uses Git repositories as the single source of truth for declarative infrastructure. Automated agents (like Argo CD or Flux) reconcile the actual state with the desired state in Git."
|
|
1078
|
+
},
|
|
1079
|
+
{
|
|
1080
|
+
"id": 19,
|
|
1081
|
+
"domain": "Application Delivery",
|
|
1082
|
+
"question": "Which command installs a Helm chart with the release name 'myapp' from a chart directory?",
|
|
1083
|
+
"options": [
|
|
1084
|
+
"helm deploy myapp ./chart",
|
|
1085
|
+
"helm install myapp ./chart",
|
|
1086
|
+
"helm apply myapp ./chart",
|
|
1087
|
+
"helm start myapp ./chart"
|
|
1088
|
+
],
|
|
1089
|
+
"correct": 1,
|
|
1090
|
+
"explanation": "helm install <release-name> <chart-path> is the correct command. You can also use --values to override defaults and --namespace to specify the target namespace."
|
|
1091
|
+
},
|
|
1092
|
+
{
|
|
1093
|
+
"id": 20,
|
|
1094
|
+
"domain": "Application Delivery",
|
|
1095
|
+
"question": "In a canary deployment strategy, what is the purpose of the 'canary' version?",
|
|
1096
|
+
"options": [
|
|
1097
|
+
"To replace the stable version immediately",
|
|
1098
|
+
"To test the new version with a small percentage of traffic before full rollout",
|
|
1099
|
+
"To run only in staging environment",
|
|
1100
|
+
"To handle high-traffic scenarios"
|
|
1101
|
+
],
|
|
1102
|
+
"correct": 1,
|
|
1103
|
+
"explanation": "Canary deployment sends a small percentage of traffic (e.g., 5-10%) to the new version. If metrics are healthy, traffic is gradually increased. This limits blast radius of new version issues."
|
|
1104
|
+
}
|
|
1105
|
+
]
|
|
1106
|
+
},
|
|
1107
|
+
{
|
|
1108
|
+
"id": "cka",
|
|
1109
|
+
"title": "CKA — Certified Kubernetes Administrator",
|
|
1110
|
+
"slug": "cka",
|
|
1111
|
+
"description": "Practice exam for CKA — 20 scenario-based questions covering all 5 domains",
|
|
1112
|
+
"icon": "award",
|
|
1113
|
+
"provider": "CNCF",
|
|
1114
|
+
"level": "Intermediate",
|
|
1115
|
+
"duration_minutes": 30,
|
|
1116
|
+
"passing_score": 66,
|
|
1117
|
+
"questions_count": 20,
|
|
1118
|
+
"tags": [
|
|
1119
|
+
"Kubernetes",
|
|
1120
|
+
"CKA",
|
|
1121
|
+
"CNCF",
|
|
1122
|
+
"DevOps",
|
|
1123
|
+
"Administration"
|
|
1124
|
+
],
|
|
1125
|
+
"series_slug": "luyen-thi-cka",
|
|
1126
|
+
"domains": [
|
|
1127
|
+
{
|
|
1128
|
+
"name": "Domain 1: Cluster Architecture, Installation & Configuration",
|
|
1129
|
+
"weight": 25,
|
|
1130
|
+
"lessons": [
|
|
1131
|
+
{
|
|
1132
|
+
"title": "Bài 1: Kubernetes Architecture & kubeadm",
|
|
1133
|
+
"slug": "01-kien-truc-cka-kubeadm"
|
|
1134
|
+
},
|
|
1135
|
+
{
|
|
1136
|
+
"title": "Bài 2: Cluster Upgrade",
|
|
1137
|
+
"slug": "02-cluster-upgrade-kubeadm"
|
|
1138
|
+
},
|
|
1139
|
+
{
|
|
1140
|
+
"title": "Bài 3: RBAC",
|
|
1141
|
+
"slug": "03-rbac-cka"
|
|
1142
|
+
}
|
|
1143
|
+
]
|
|
1144
|
+
},
|
|
1145
|
+
{
|
|
1146
|
+
"name": "Domain 2: Workloads & Scheduling",
|
|
1147
|
+
"weight": 15,
|
|
1148
|
+
"lessons": [
|
|
1149
|
+
{
|
|
1150
|
+
"title": "Bài 4: Deployments, DaemonSets & StatefulSets",
|
|
1151
|
+
"slug": "04-deployments-daemonsets-statefulsets"
|
|
1152
|
+
},
|
|
1153
|
+
{
|
|
1154
|
+
"title": "Bài 5: Scheduling — Taints, Tolerations & Affinity",
|
|
1155
|
+
"slug": "05-scheduling-taints-affinity"
|
|
1156
|
+
}
|
|
1157
|
+
]
|
|
1158
|
+
},
|
|
1159
|
+
{
|
|
1160
|
+
"name": "Domain 3: Services & Networking",
|
|
1161
|
+
"weight": 20,
|
|
1162
|
+
"lessons": [
|
|
1163
|
+
{
|
|
1164
|
+
"title": "Bài 6: Services, Endpoints & CoreDNS",
|
|
1165
|
+
"slug": "06-services-endpoints-coredns"
|
|
1166
|
+
},
|
|
1167
|
+
{
|
|
1168
|
+
"title": "Bài 7: Ingress, Network Policies & CNI",
|
|
1169
|
+
"slug": "07-ingress-networkpolicies-cni"
|
|
1170
|
+
}
|
|
1171
|
+
]
|
|
1172
|
+
},
|
|
1173
|
+
{
|
|
1174
|
+
"name": "Domain 4: Storage",
|
|
1175
|
+
"weight": 10,
|
|
1176
|
+
"lessons": [
|
|
1177
|
+
{
|
|
1178
|
+
"title": "Bài 8: Persistent Volumes, PVCs & StorageClass",
|
|
1179
|
+
"slug": "08-persistent-volumes-storageclass"
|
|
1180
|
+
}
|
|
1181
|
+
]
|
|
1182
|
+
},
|
|
1183
|
+
{
|
|
1184
|
+
"name": "Domain 5: Troubleshooting",
|
|
1185
|
+
"weight": 30,
|
|
1186
|
+
"lessons": [
|
|
1187
|
+
{
|
|
1188
|
+
"title": "Bài 9: etcd Backup & Restore",
|
|
1189
|
+
"slug": "09-etcd-backup-restore"
|
|
1190
|
+
},
|
|
1191
|
+
{
|
|
1192
|
+
"title": "Bài 10: Troubleshooting Nodes",
|
|
1193
|
+
"slug": "10-troubleshooting-nodes"
|
|
1194
|
+
},
|
|
1195
|
+
{
|
|
1196
|
+
"title": "Bài 11: Troubleshooting Workloads",
|
|
1197
|
+
"slug": "11-troubleshooting-workloads"
|
|
1198
|
+
},
|
|
1199
|
+
{
|
|
1200
|
+
"title": "Bài 12: Troubleshooting Networking & Exam Strategy",
|
|
1201
|
+
"slug": "12-troubleshooting-networking-exam"
|
|
1202
|
+
}
|
|
1203
|
+
]
|
|
1204
|
+
}
|
|
1205
|
+
],
|
|
1206
|
+
"questions": [
|
|
1207
|
+
{
|
|
1208
|
+
"id": 1,
|
|
1209
|
+
"domain": "Cluster Architecture",
|
|
1210
|
+
"question": "You need to backup etcd on the control plane node. The etcd data directory is /var/lib/etcd, CA cert at /etc/kubernetes/pki/etcd/ca.crt, server cert at /etc/kubernetes/pki/etcd/server.crt, and key at /etc/kubernetes/pki/etcd/server.key. Which command creates the snapshot?",
|
|
1211
|
+
"options": [
|
|
1212
|
+
"kubectl backup etcd --destination=/tmp/etcd-backup.db",
|
|
1213
|
+
"ETCDCTL_API=3 etcdctl snapshot save /tmp/etcd-backup.db --cacert=/etc/kubernetes/pki/etcd/ca.crt --cert=/etc/kubernetes/pki/etcd/server.crt --key=/etc/kubernetes/pki/etcd/server.key",
|
|
1214
|
+
"etcdctl snapshot save /tmp/etcd-backup.db",
|
|
1215
|
+
"kubeadm etcd backup /tmp/etcd-backup.db"
|
|
1216
|
+
],
|
|
1217
|
+
"correct": 1,
|
|
1218
|
+
"explanation": "ETCDCTL_API=3 sets the API version. The etcdctl snapshot save command requires --cacert, --cert, and --key for authentication. Without ETCDCTL_API=3, etcdctl defaults to v2 which doesn't support snapshot."
|
|
1219
|
+
},
|
|
1220
|
+
{
|
|
1221
|
+
"id": 2,
|
|
1222
|
+
"domain": "Cluster Architecture",
|
|
1223
|
+
"question": "A cluster has Kubernetes v1.30 and you need to upgrade it to v1.31. What is the correct order of operations?",
|
|
1224
|
+
"options": [
|
|
1225
|
+
"Upgrade worker nodes first, then control plane",
|
|
1226
|
+
"Upgrade all nodes simultaneously with kubeadm upgrade apply",
|
|
1227
|
+
"Upgrade control plane first, then drain and upgrade each worker node",
|
|
1228
|
+
"Upgrade etcd first, then control plane, then workers"
|
|
1229
|
+
],
|
|
1230
|
+
"correct": 2,
|
|
1231
|
+
"explanation": "Always upgrade the control plane first (kubeadm upgrade apply v1.31.x), then upgrade each worker node one at a time: drain the node, upgrade kubeadm/kubelet/kubectl, then uncordon."
|
|
1232
|
+
},
|
|
1233
|
+
{
|
|
1234
|
+
"id": 3,
|
|
1235
|
+
"domain": "Cluster Architecture",
|
|
1236
|
+
"question": "You need to create a ClusterRole 'pod-reader' that allows get, watch, list on pods, then bind it to user 'jane'. Which commands achieve this?",
|
|
1237
|
+
"options": [
|
|
1238
|
+
"kubectl create role pod-reader --verb=get,watch,list --resource=pods && kubectl create rolebinding pod-reader --role=pod-reader --user=jane",
|
|
1239
|
+
"kubectl create clusterrole pod-reader --verb=get,watch,list --resource=pods && kubectl create clusterrolebinding pod-reader --clusterrole=pod-reader --user=jane",
|
|
1240
|
+
"kubectl create rolebinding pod-reader --clusterrole=pod-reader --user=jane",
|
|
1241
|
+
"kubectl apply -f rbac.yaml"
|
|
1242
|
+
],
|
|
1243
|
+
"correct": 1,
|
|
1244
|
+
"explanation": "ClusterRole is cluster-scoped. ClusterRoleBinding binds a ClusterRole to a user cluster-wide. If you used Role + RoleBinding, jane would only have access in one namespace."
|
|
1245
|
+
},
|
|
1246
|
+
{
|
|
1247
|
+
"id": 4,
|
|
1248
|
+
"domain": "Workloads & Scheduling",
|
|
1249
|
+
"question": "A node has a taint 'key=value:NoSchedule'. How do you allow a Pod to be scheduled on this node?",
|
|
1250
|
+
"options": [
|
|
1251
|
+
"Add a nodeSelector: {key: value} to the Pod spec",
|
|
1252
|
+
"Add a toleration to the Pod spec: {key: key, operator: Equal, value: value, effect: NoSchedule}",
|
|
1253
|
+
"Add node affinity with requiredDuringScheduling",
|
|
1254
|
+
"Remove the taint before scheduling"
|
|
1255
|
+
],
|
|
1256
|
+
"correct": 1,
|
|
1257
|
+
"explanation": "Tolerations allow Pods to be scheduled on nodes with matching taints. The toleration must match the taint key, value, and effect. Without the toleration, the scheduler will skip this node."
|
|
1258
|
+
},
|
|
1259
|
+
{
|
|
1260
|
+
"id": 5,
|
|
1261
|
+
"domain": "Workloads & Scheduling",
|
|
1262
|
+
"question": "You want to ensure a Pod is ONLY scheduled on nodes labeled 'disktype=ssd'. Which scheduling feature should you use?",
|
|
1263
|
+
"options": [
|
|
1264
|
+
"Taints and Tolerations",
|
|
1265
|
+
"Pod Anti-affinity",
|
|
1266
|
+
"Node Affinity (requiredDuringSchedulingIgnoredDuringExecution)",
|
|
1267
|
+
"nodeSelector is sufficient for strict requirements"
|
|
1268
|
+
],
|
|
1269
|
+
"correct": 2,
|
|
1270
|
+
"explanation": "Node Affinity with requiredDuringSchedulingIgnoredDuringExecution enforces strict node selection. nodeSelector works too, but Node Affinity provides richer expressions (In, NotIn, Exists, etc.)."
|
|
1271
|
+
},
|
|
1272
|
+
{
|
|
1273
|
+
"id": 6,
|
|
1274
|
+
"domain": "Services & Networking",
|
|
1275
|
+
"question": "A Pod cannot resolve the DNS name 'my-service.default.svc.cluster.local'. CoreDNS pods are running. What should you check FIRST?",
|
|
1276
|
+
"options": [
|
|
1277
|
+
"Check if the Service exists and its selector matches the Pod labels",
|
|
1278
|
+
"Restart the kubelet on all nodes",
|
|
1279
|
+
"Check if the Pod has network access",
|
|
1280
|
+
"Delete and recreate the Service"
|
|
1281
|
+
],
|
|
1282
|
+
"correct": 0,
|
|
1283
|
+
"explanation": "First verify the Service exists (kubectl get svc my-service) and its selector matches the target Pods (kubectl get endpoints my-service). If Endpoints are empty, the selector doesn't match any Pods."
|
|
1284
|
+
},
|
|
1285
|
+
{
|
|
1286
|
+
"id": 7,
|
|
1287
|
+
"domain": "Services & Networking",
|
|
1288
|
+
"question": "You need to create a NetworkPolicy that denies ALL ingress traffic to Pods in namespace 'prod'. Which spec achieves this?",
|
|
1289
|
+
"options": [
|
|
1290
|
+
"spec: {podSelector: {}, policyTypes: [Ingress], ingress: [{}]}",
|
|
1291
|
+
"spec: {podSelector: {}, policyTypes: [Ingress]}",
|
|
1292
|
+
"spec: {podSelector: {matchLabels: {}}, policyTypes: [Deny]}",
|
|
1293
|
+
"spec: {podSelector: null, policyTypes: [Ingress], ingress: []}"
|
|
1294
|
+
],
|
|
1295
|
+
"correct": 1,
|
|
1296
|
+
"explanation": "An empty podSelector ({}) selects ALL Pods in the namespace. Specifying policyTypes: [Ingress] without any ingress rules creates a default deny for all ingress traffic."
|
|
1297
|
+
},
|
|
1298
|
+
{
|
|
1299
|
+
"id": 8,
|
|
1300
|
+
"domain": "Storage",
|
|
1301
|
+
"question": "A PersistentVolumeClaim is stuck in 'Pending' state. What is the MOST likely cause?",
|
|
1302
|
+
"options": [
|
|
1303
|
+
"The Pod using the PVC is not running",
|
|
1304
|
+
"No PersistentVolume matches the PVC's storage class, access mode, or capacity requirements",
|
|
1305
|
+
"The namespace doesn't exist",
|
|
1306
|
+
"kubectl was not used to create the PVC"
|
|
1307
|
+
],
|
|
1308
|
+
"correct": 1,
|
|
1309
|
+
"explanation": "PVC Pending means no PV matched the binding criteria: storageClassName, accessModes, and requested storage capacity. Check with kubectl describe pvc to see the binding failure reason."
|
|
1310
|
+
},
|
|
1311
|
+
{
|
|
1312
|
+
"id": 9,
|
|
1313
|
+
"domain": "Storage",
|
|
1314
|
+
"question": "Which reclaim policy ensures a PersistentVolume is deleted when its claim is released?",
|
|
1315
|
+
"options": [
|
|
1316
|
+
"Retain",
|
|
1317
|
+
"Recycle",
|
|
1318
|
+
"Delete",
|
|
1319
|
+
"Release"
|
|
1320
|
+
],
|
|
1321
|
+
"correct": 2,
|
|
1322
|
+
"explanation": "Delete policy automatically deletes the PV and its associated storage asset when the PVC is deleted. Retain keeps the PV data for manual recovery. Recycle is deprecated."
|
|
1323
|
+
},
|
|
1324
|
+
{
|
|
1325
|
+
"id": 10,
|
|
1326
|
+
"domain": "Troubleshooting",
|
|
1327
|
+
"question": "A Node shows status 'NotReady'. Which command gives you the MOST useful diagnostic information?",
|
|
1328
|
+
"options": [
|
|
1329
|
+
"kubectl get node <node-name>",
|
|
1330
|
+
"kubectl describe node <node-name> and check Events/Conditions sections",
|
|
1331
|
+
"kubectl logs <node-name>",
|
|
1332
|
+
"kubectl get pods --all-namespaces"
|
|
1333
|
+
],
|
|
1334
|
+
"correct": 1,
|
|
1335
|
+
"explanation": "kubectl describe node shows Conditions (DiskPressure, MemoryPressure, NetworkUnavailable, Ready), resource usage, and Events. On the node itself, check: systemctl status kubelet and journalctl -u kubelet."
|
|
1336
|
+
},
|
|
1337
|
+
{
|
|
1338
|
+
"id": 11,
|
|
1339
|
+
"domain": "Troubleshooting",
|
|
1340
|
+
"question": "A Pod is in 'CrashLoopBackOff'. Which command shows the logs from the PREVIOUS container instance?",
|
|
1341
|
+
"options": [
|
|
1342
|
+
"kubectl logs <pod-name>",
|
|
1343
|
+
"kubectl logs <pod-name> --previous",
|
|
1344
|
+
"kubectl describe pod <pod-name>",
|
|
1345
|
+
"kubectl logs <pod-name> --all"
|
|
1346
|
+
],
|
|
1347
|
+
"correct": 1,
|
|
1348
|
+
"explanation": "kubectl logs --previous (or -p) shows the logs from the previously terminated container instance, which is crucial for diagnosing why a container crashed."
|
|
1349
|
+
},
|
|
1350
|
+
{
|
|
1351
|
+
"id": 12,
|
|
1352
|
+
"domain": "Troubleshooting",
|
|
1353
|
+
"question": "A Pod is stuck in 'ImagePullBackOff'. What is the MOST common cause?",
|
|
1354
|
+
"options": [
|
|
1355
|
+
"Insufficient CPU resources on the node",
|
|
1356
|
+
"Wrong image name/tag or missing image pull secret for private registry",
|
|
1357
|
+
"Node affinity constraints not met",
|
|
1358
|
+
"NetworkPolicy blocking traffic"
|
|
1359
|
+
],
|
|
1360
|
+
"correct": 1,
|
|
1361
|
+
"explanation": "ImagePullBackOff is caused by: (1) incorrect image name/tag, (2) missing or incorrect imagePullSecret for private registries, (3) registry is unreachable. Check kubectl describe pod for the exact error."
|
|
1362
|
+
},
|
|
1363
|
+
{
|
|
1364
|
+
"id": 13,
|
|
1365
|
+
"domain": "Troubleshooting",
|
|
1366
|
+
"question": "After upgrading a cluster, several Pods show 'OOMKilled'. What is the immediate remediation?",
|
|
1367
|
+
"options": [
|
|
1368
|
+
"Increase the namespace ResourceQuota",
|
|
1369
|
+
"Increase the Pod's memory limit or add more nodes to the cluster",
|
|
1370
|
+
"Restart the kubelet service",
|
|
1371
|
+
"Delete and recreate the Pods"
|
|
1372
|
+
],
|
|
1373
|
+
"correct": 1,
|
|
1374
|
+
"explanation": "OOMKilled means the container exceeded its memory limit. Fix: increase the memory limit in the Pod spec, or add more nodes if the node is under memory pressure. Also investigate for memory leaks."
|
|
1375
|
+
},
|
|
1376
|
+
{
|
|
1377
|
+
"id": 14,
|
|
1378
|
+
"domain": "Troubleshooting",
|
|
1379
|
+
"question": "How do you run a temporary debug pod in a namespace to test network connectivity?",
|
|
1380
|
+
"options": [
|
|
1381
|
+
"kubectl create pod debug --image=busybox",
|
|
1382
|
+
"kubectl run debug --image=busybox --rm -it -- sh",
|
|
1383
|
+
"kubectl exec <existing-pod> -- sh",
|
|
1384
|
+
"kubectl debug node/<node-name>"
|
|
1385
|
+
],
|
|
1386
|
+
"correct": 1,
|
|
1387
|
+
"explanation": "kubectl run with --rm (auto-delete) and -it (interactive terminal) creates a temporary Pod for debugging. The -- sh runs a shell. Use --restart=Never to prevent it from being rescheduled."
|
|
1388
|
+
},
|
|
1389
|
+
{
|
|
1390
|
+
"id": 15,
|
|
1391
|
+
"domain": "Cluster Architecture",
|
|
1392
|
+
"question": "Which file does kubeadm use to store cluster configuration after initialization?",
|
|
1393
|
+
"options": [
|
|
1394
|
+
"/etc/kubernetes/admin.conf",
|
|
1395
|
+
"ConfigMap 'kubeadm-config' in kube-system namespace",
|
|
1396
|
+
"/var/lib/kubelet/config.yaml",
|
|
1397
|
+
"/etc/kubernetes/manifests/kube-apiserver.yaml"
|
|
1398
|
+
],
|
|
1399
|
+
"correct": 1,
|
|
1400
|
+
"explanation": "kubeadm stores its configuration in a ConfigMap named 'kubeadm-config' in the kube-system namespace. This is used during upgrades: kubeadm reads this config with kubeadm upgrade plan."
|
|
1401
|
+
},
|
|
1402
|
+
{
|
|
1403
|
+
"id": 16,
|
|
1404
|
+
"domain": "Workloads & Scheduling",
|
|
1405
|
+
"question": "You need to deploy an app with the guarantee that it will NEVER be evicted under memory pressure. Which QoS class achieves this?",
|
|
1406
|
+
"options": [
|
|
1407
|
+
"BestEffort",
|
|
1408
|
+
"Burstable",
|
|
1409
|
+
"Guaranteed",
|
|
1410
|
+
"Priority"
|
|
1411
|
+
],
|
|
1412
|
+
"correct": 2,
|
|
1413
|
+
"explanation": "Guaranteed QoS requires memory requests == memory limits (and CPU too). These Pods are evicted last. BestEffort (no requests/limits) is evicted first. Burstable is in between."
|
|
1414
|
+
},
|
|
1415
|
+
{
|
|
1416
|
+
"id": 17,
|
|
1417
|
+
"domain": "Services & Networking",
|
|
1418
|
+
"question": "Which command quickly tests if kube-proxy iptables rules are working for a Service named 'web' in the default namespace?",
|
|
1419
|
+
"options": [
|
|
1420
|
+
"kubectl exec <pod> -- curl web.default.svc.cluster.local",
|
|
1421
|
+
"kubectl describe service web",
|
|
1422
|
+
"iptables -L -n | grep web",
|
|
1423
|
+
"kubectl get endpoints web"
|
|
1424
|
+
],
|
|
1425
|
+
"correct": 0,
|
|
1426
|
+
"explanation": "Running curl from inside a Pod against the Service DNS name tests the full networking path: DNS resolution → kube-proxy IP tables → Pod endpoint. If DNS resolves but curl fails, it may be a kube-proxy or NetworkPolicy issue."
|
|
1427
|
+
},
|
|
1428
|
+
{
|
|
1429
|
+
"id": 18,
|
|
1430
|
+
"domain": "Storage",
|
|
1431
|
+
"question": "A StatefulSet needs each Pod to have its own PersistentVolumeClaim. How is this configured?",
|
|
1432
|
+
"options": [
|
|
1433
|
+
"Create individual PVCs manually and reference them by name in each Pod",
|
|
1434
|
+
"Use volumeClaimTemplates in the StatefulSet spec",
|
|
1435
|
+
"Use a shared ReadWriteMany PVC",
|
|
1436
|
+
"Configure hostPath volumes instead"
|
|
1437
|
+
],
|
|
1438
|
+
"correct": 1,
|
|
1439
|
+
"explanation": "volumeClaimTemplates in a StatefulSet spec automatically creates a PVC for each Pod replica. The PVC name follows the pattern <template-name>-<pod-name>. These PVCs are NOT deleted when the StatefulSet is deleted."
|
|
1440
|
+
},
|
|
1441
|
+
{
|
|
1442
|
+
"id": 19,
|
|
1443
|
+
"domain": "Cluster Architecture",
|
|
1444
|
+
"question": "What happens to running Pods when a node is drained with 'kubectl drain <node> --ignore-daemonsets'?",
|
|
1445
|
+
"options": [
|
|
1446
|
+
"Pods are deleted immediately without rescheduling",
|
|
1447
|
+
"Pods are evicted and rescheduled on other available nodes",
|
|
1448
|
+
"Pods continue running but no new Pods are scheduled",
|
|
1449
|
+
"Pods are paused until the node is uncordoned"
|
|
1450
|
+
],
|
|
1451
|
+
"correct": 1,
|
|
1452
|
+
"explanation": "kubectl drain cordons the node (prevents new scheduling) then gracefully evicts existing Pods. The Pod controllers (Deployments, etc.) reschedule the Pods on other nodes. DaemonSet Pods are ignored with the flag."
|
|
1453
|
+
},
|
|
1454
|
+
{
|
|
1455
|
+
"id": 20,
|
|
1456
|
+
"domain": "Troubleshooting",
|
|
1457
|
+
"question": "A Deployment has 3 replicas but only 1 Pod is running. The other 2 are Pending. What should you check?",
|
|
1458
|
+
"options": [
|
|
1459
|
+
"Check if the Deployment manifest has errors",
|
|
1460
|
+
"Check node resources (kubectl describe node), Pod events, and any PodDisruptionBudgets",
|
|
1461
|
+
"Check if the namespace exists",
|
|
1462
|
+
"Restart the kube-controller-manager"
|
|
1463
|
+
],
|
|
1464
|
+
"correct": 1,
|
|
1465
|
+
"explanation": "Pending Pods mean the scheduler can't place them. Check: (1) kubectl describe pod for Events (Insufficient CPU/memory, taint not tolerated), (2) kubectl describe node for available capacity, (3) any resource quotas blocking scheduling."
|
|
1466
|
+
}
|
|
1467
|
+
]
|
|
1468
|
+
},
|
|
1469
|
+
{
|
|
1470
|
+
"id": "ckad",
|
|
1471
|
+
"title": "CKAD — Certified Kubernetes Application Developer",
|
|
1472
|
+
"slug": "ckad",
|
|
1473
|
+
"description": "Practice exam for CKAD — 20 scenario-based questions covering all 5 domains",
|
|
1474
|
+
"icon": "award",
|
|
1475
|
+
"provider": "CNCF",
|
|
1476
|
+
"level": "Intermediate",
|
|
1477
|
+
"duration_minutes": 30,
|
|
1478
|
+
"passing_score": 66,
|
|
1479
|
+
"questions_count": 20,
|
|
1480
|
+
"tags": [
|
|
1481
|
+
"Kubernetes",
|
|
1482
|
+
"CKAD",
|
|
1483
|
+
"CNCF",
|
|
1484
|
+
"DevOps",
|
|
1485
|
+
"Development"
|
|
1486
|
+
],
|
|
1487
|
+
"series_slug": "luyen-thi-ckad",
|
|
1488
|
+
"domains": [
|
|
1489
|
+
{
|
|
1490
|
+
"name": "Domain 1: Application Design and Build",
|
|
1491
|
+
"weight": 20,
|
|
1492
|
+
"lessons": [
|
|
1493
|
+
{
|
|
1494
|
+
"title": "Bài 1: Multi-container Pods & Init Containers",
|
|
1495
|
+
"slug": "01-multi-container-pods"
|
|
1496
|
+
},
|
|
1497
|
+
{
|
|
1498
|
+
"title": "Bài 2: Jobs, CronJobs & Resources",
|
|
1499
|
+
"slug": "02-jobs-cronjobs-resources"
|
|
1500
|
+
}
|
|
1501
|
+
]
|
|
1502
|
+
},
|
|
1503
|
+
{
|
|
1504
|
+
"name": "Domain 2: Application Deployment",
|
|
1505
|
+
"weight": 20,
|
|
1506
|
+
"lessons": [
|
|
1507
|
+
{
|
|
1508
|
+
"title": "Bài 3: Rolling Updates, Rollbacks & Strategies",
|
|
1509
|
+
"slug": "03-rolling-updates-rollbacks"
|
|
1510
|
+
},
|
|
1511
|
+
{
|
|
1512
|
+
"title": "Bài 4: Helm & Kustomize",
|
|
1513
|
+
"slug": "04-helm-kustomize"
|
|
1514
|
+
}
|
|
1515
|
+
]
|
|
1516
|
+
},
|
|
1517
|
+
{
|
|
1518
|
+
"name": "Domain 3: Application Observability and Maintenance",
|
|
1519
|
+
"weight": 15,
|
|
1520
|
+
"lessons": [
|
|
1521
|
+
{
|
|
1522
|
+
"title": "Bài 5: Probes, Logging & Debugging",
|
|
1523
|
+
"slug": "05-probes-logging-debugging"
|
|
1524
|
+
}
|
|
1525
|
+
]
|
|
1526
|
+
},
|
|
1527
|
+
{
|
|
1528
|
+
"name": "Domain 4: Application Environment, Configuration & Security",
|
|
1529
|
+
"weight": 25,
|
|
1530
|
+
"lessons": [
|
|
1531
|
+
{
|
|
1532
|
+
"title": "Bài 6: ConfigMaps & Secrets",
|
|
1533
|
+
"slug": "06-configmaps-secrets"
|
|
1534
|
+
},
|
|
1535
|
+
{
|
|
1536
|
+
"title": "Bài 7: SecurityContext & Pod Security",
|
|
1537
|
+
"slug": "07-securitycontext-pod-security"
|
|
1538
|
+
},
|
|
1539
|
+
{
|
|
1540
|
+
"title": "Bài 8: Resource Requests, Limits & QoS",
|
|
1541
|
+
"slug": "08-resources-qos"
|
|
1542
|
+
}
|
|
1543
|
+
]
|
|
1544
|
+
},
|
|
1545
|
+
{
|
|
1546
|
+
"name": "Domain 5: Services & Networking",
|
|
1547
|
+
"weight": 20,
|
|
1548
|
+
"lessons": [
|
|
1549
|
+
{
|
|
1550
|
+
"title": "Bài 9: Services & Ingress",
|
|
1551
|
+
"slug": "09-services-ingress"
|
|
1552
|
+
},
|
|
1553
|
+
{
|
|
1554
|
+
"title": "Bài 10: Network Policies & Exam Strategy",
|
|
1555
|
+
"slug": "10-networkpolicies-exam-strategy"
|
|
1556
|
+
}
|
|
1557
|
+
]
|
|
1558
|
+
}
|
|
1559
|
+
],
|
|
1560
|
+
"questions": [
|
|
1561
|
+
{
|
|
1562
|
+
"id": 1,
|
|
1563
|
+
"domain": "App Design and Build",
|
|
1564
|
+
"question": "A Pod needs to wait for a database to be ready before starting the main application container. Which Kubernetes feature should you use?",
|
|
1565
|
+
"options": [
|
|
1566
|
+
"Liveness probe",
|
|
1567
|
+
"Init container",
|
|
1568
|
+
"Readiness probe",
|
|
1569
|
+
"Sidecar container"
|
|
1570
|
+
],
|
|
1571
|
+
"correct": 1,
|
|
1572
|
+
"explanation": "Init containers run to completion before app containers start. Use an init container to check database connectivity (e.g., nslookup or nc -z db-host 5432). Only after it succeeds will the main container start."
|
|
1573
|
+
},
|
|
1574
|
+
{
|
|
1575
|
+
"id": 2,
|
|
1576
|
+
"domain": "App Design and Build",
|
|
1577
|
+
"question": "Which sidecar pattern is used to translate the communication protocol of the main container to match external services?",
|
|
1578
|
+
"options": [
|
|
1579
|
+
"Sidecar",
|
|
1580
|
+
"Ambassador",
|
|
1581
|
+
"Adapter",
|
|
1582
|
+
"Init"
|
|
1583
|
+
],
|
|
1584
|
+
"correct": 2,
|
|
1585
|
+
"explanation": "The Adapter pattern transforms the output of the main container to match a standard format expected by external systems (e.g., converting proprietary metrics to Prometheus format). Ambassador is for proxying outbound connections."
|
|
1586
|
+
},
|
|
1587
|
+
{
|
|
1588
|
+
"id": 3,
|
|
1589
|
+
"domain": "App Design and Build",
|
|
1590
|
+
"question": "You need to run a batch job that should retry up to 4 times on failure. Which Job spec fields control this?",
|
|
1591
|
+
"options": [
|
|
1592
|
+
"retries: 4 and failurePolicy: retry",
|
|
1593
|
+
"backoffLimit: 4",
|
|
1594
|
+
"completions: 4 and parallelism: 1",
|
|
1595
|
+
"restartPolicy: OnFailure and maxRetries: 4"
|
|
1596
|
+
],
|
|
1597
|
+
"correct": 1,
|
|
1598
|
+
"explanation": "backoffLimit specifies the number of retries before considering a Job failed. Default is 6. The restartPolicy should be Never or OnFailure. completions controls how many successful completions are needed."
|
|
1599
|
+
},
|
|
1600
|
+
{
|
|
1601
|
+
"id": 4,
|
|
1602
|
+
"domain": "App Deployment",
|
|
1603
|
+
"question": "A Deployment is performing a rolling update. You spot a bug and need to stop it and go back to the previous version. Which commands achieve this?",
|
|
1604
|
+
"options": [
|
|
1605
|
+
"kubectl delete deployment <name> && kubectl apply -f old-deployment.yaml",
|
|
1606
|
+
"kubectl rollout pause <deployment> && kubectl rollout undo deployment/<name>",
|
|
1607
|
+
"kubectl rollout undo deployment/<name>",
|
|
1608
|
+
"kubectl set image deployment/<name> container=old-image"
|
|
1609
|
+
],
|
|
1610
|
+
"correct": 2,
|
|
1611
|
+
"explanation": "kubectl rollout undo deployment/<name> reverts to the previous revision. You can also specify --to-revision=N to go to a specific version. Check history with kubectl rollout history deployment/<name>."
|
|
1612
|
+
},
|
|
1613
|
+
{
|
|
1614
|
+
"id": 5,
|
|
1615
|
+
"domain": "App Deployment",
|
|
1616
|
+
"question": "In a RollingUpdate Deployment strategy, what does 'maxUnavailable: 1' mean?",
|
|
1617
|
+
"options": [
|
|
1618
|
+
"Maximum 1 Pod can be created above the desired replicas during update",
|
|
1619
|
+
"Maximum 1 Pod can be unavailable at any time during the update",
|
|
1620
|
+
"The update fails if more than 1 Pod is unavailable",
|
|
1621
|
+
"Maximum 1 node can be updated at a time"
|
|
1622
|
+
],
|
|
1623
|
+
"correct": 1,
|
|
1624
|
+
"explanation": "maxUnavailable controls how many Pods can be down below the desired count during rolling updates. maxSurge controls how many extra Pods can be created above desired. These balance update speed vs availability."
|
|
1625
|
+
},
|
|
1626
|
+
{
|
|
1627
|
+
"id": 6,
|
|
1628
|
+
"domain": "App Observability",
|
|
1629
|
+
"question": "A container is running but not serving traffic. The Pod is Ready=False. Which probe is MOST likely failing?",
|
|
1630
|
+
"options": [
|
|
1631
|
+
"Startup probe",
|
|
1632
|
+
"Liveness probe",
|
|
1633
|
+
"Readiness probe",
|
|
1634
|
+
"Health probe"
|
|
1635
|
+
],
|
|
1636
|
+
"correct": 2,
|
|
1637
|
+
"explanation": "Readiness probe determines if a container is ready to accept traffic. A failing readiness probe removes the Pod from Service endpoints. Liveness probe triggers container restart. Startup probe guards slow-starting containers."
|
|
1638
|
+
},
|
|
1639
|
+
{
|
|
1640
|
+
"id": 7,
|
|
1641
|
+
"domain": "App Observability",
|
|
1642
|
+
"question": "A container keeps restarting. The liveness probe is configured with initialDelaySeconds: 5 but the app takes 30s to start. What is the fix?",
|
|
1643
|
+
"options": [
|
|
1644
|
+
"Increase the failureThreshold to 10",
|
|
1645
|
+
"Use a startup probe with failureThreshold * periodSeconds > 30s, or increase initialDelaySeconds to 35",
|
|
1646
|
+
"Disable the liveness probe",
|
|
1647
|
+
"Reduce the app startup time"
|
|
1648
|
+
],
|
|
1649
|
+
"correct": 1,
|
|
1650
|
+
"explanation": "Use a startup probe: it protects slow-starting containers by allowing more time only at startup. Once the startup probe succeeds, the liveness probe takes over. Alternatively, initialDelaySeconds >= 35 would also work but startup probe is cleaner."
|
|
1651
|
+
},
|
|
1652
|
+
{
|
|
1653
|
+
"id": 8,
|
|
1654
|
+
"domain": "App Environment, Config & Security",
|
|
1655
|
+
"question": "You have a Secret named 'db-secret' with key 'password'. How do you inject it as an environment variable 'DB_PASSWORD' in a Pod?",
|
|
1656
|
+
"options": [
|
|
1657
|
+
"env: [{name: DB_PASSWORD, value: $(db-secret.password)}]",
|
|
1658
|
+
"env: [{name: DB_PASSWORD, valueFrom: {secretKeyRef: {name: db-secret, key: password}}}]",
|
|
1659
|
+
"envFrom: [{secretRef: {name: db-secret}}]",
|
|
1660
|
+
"env: [{name: DB_PASSWORD, secret: db-secret}]"
|
|
1661
|
+
],
|
|
1662
|
+
"correct": 1,
|
|
1663
|
+
"explanation": "valueFrom.secretKeyRef injects a specific key from a Secret as an env var. envFrom.secretRef injects ALL keys from the Secret as env vars. The first creates DB_PASSWORD specifically from the Secret's 'password' key."
|
|
1664
|
+
},
|
|
1665
|
+
{
|
|
1666
|
+
"id": 9,
|
|
1667
|
+
"domain": "App Environment, Config & Security",
|
|
1668
|
+
"question": "A container must run as user ID 1000 and cannot write to the root filesystem. Which SecurityContext settings achieve this?",
|
|
1669
|
+
"options": [
|
|
1670
|
+
"runAsUser: 1000, readOnlyRootFilesystem: true",
|
|
1671
|
+
"userId: 1000, immutableFilesystem: true",
|
|
1672
|
+
"runAsNonRoot: true, disableWrite: true",
|
|
1673
|
+
"securityContext: {user: 1000, readonly: true}"
|
|
1674
|
+
],
|
|
1675
|
+
"correct": 0,
|
|
1676
|
+
"explanation": "runAsUser: 1000 sets the UID. readOnlyRootFilesystem: true prevents any writes to the container filesystem (use volume mounts for writable paths). These are container-level SecurityContext fields."
|
|
1677
|
+
},
|
|
1678
|
+
{
|
|
1679
|
+
"id": 10,
|
|
1680
|
+
"domain": "App Environment, Config & Security",
|
|
1681
|
+
"question": "Which Pod Security Standard level prevents privilege escalation and restricts capabilities but is less restrictive than Restricted?",
|
|
1682
|
+
"options": [
|
|
1683
|
+
"Privileged",
|
|
1684
|
+
"Baseline",
|
|
1685
|
+
"Restricted",
|
|
1686
|
+
"Limited"
|
|
1687
|
+
],
|
|
1688
|
+
"correct": 1,
|
|
1689
|
+
"explanation": "Baseline prevents known privilege escalations while allowing the default Docker/container configuration. Restricted is the most hardened. Privileged allows everything. Apply with: kubectl label namespace <ns> pod-security.kubernetes.io/enforce=baseline"
|
|
1690
|
+
},
|
|
1691
|
+
{
|
|
1692
|
+
"id": 11,
|
|
1693
|
+
"domain": "App Environment, Config & Security",
|
|
1694
|
+
"question": "A Pod consumes 500m CPU but has no limit set. Which QoS class is assigned?",
|
|
1695
|
+
"options": [
|
|
1696
|
+
"Guaranteed",
|
|
1697
|
+
"Burstable",
|
|
1698
|
+
"BestEffort",
|
|
1699
|
+
"Limited"
|
|
1700
|
+
],
|
|
1701
|
+
"correct": 1,
|
|
1702
|
+
"explanation": "Burstable: at least one container has CPU or memory request/limit set, but not all (or requests != limits). BestEffort: no requests or limits at all. Guaranteed: all containers have requests == limits for both CPU and memory."
|
|
1703
|
+
},
|
|
1704
|
+
{
|
|
1705
|
+
"id": 12,
|
|
1706
|
+
"domain": "Services & Networking",
|
|
1707
|
+
"question": "You need to expose a Deployment externally on port 80 and route to container port 8080. Which Service spec is correct?",
|
|
1708
|
+
"options": [
|
|
1709
|
+
"type: NodePort, port: 80, targetPort: 8080",
|
|
1710
|
+
"type: ClusterIP, port: 8080, targetPort: 80",
|
|
1711
|
+
"type: LoadBalancer, port: 80, targetPort: 8080",
|
|
1712
|
+
"type: ExternalName, port: 80"
|
|
1713
|
+
],
|
|
1714
|
+
"correct": 2,
|
|
1715
|
+
"explanation": "LoadBalancer exposes the Service externally. port: 80 is the Service port that clients connect to. targetPort: 8080 is the container port traffic is forwarded to. NodePort would also work but exposes a random high port."
|
|
1716
|
+
},
|
|
1717
|
+
{
|
|
1718
|
+
"id": 13,
|
|
1719
|
+
"domain": "Services & Networking",
|
|
1720
|
+
"question": "An Ingress resource is created but returns 404 for all paths. What should you check FIRST?",
|
|
1721
|
+
"options": [
|
|
1722
|
+
"Whether the Service backend exists and has endpoints",
|
|
1723
|
+
"Whether the node has enough CPU",
|
|
1724
|
+
"Whether the Ingress controller is installed and running",
|
|
1725
|
+
"Whether TLS certificates are valid"
|
|
1726
|
+
],
|
|
1727
|
+
"correct": 2,
|
|
1728
|
+
"explanation": "Without an Ingress controller, Ingress resources have no effect. Check: kubectl get pods -n ingress-nginx (or your controller namespace). Then verify the IngressClass matches your Ingress spec's ingressClassName."
|
|
1729
|
+
},
|
|
1730
|
+
{
|
|
1731
|
+
"id": 14,
|
|
1732
|
+
"domain": "Services & Networking",
|
|
1733
|
+
"question": "You need to create a NetworkPolicy that allows only Pods with label 'role=frontend' to access Pods with label 'role=backend' on port 5432. Which spec is correct?",
|
|
1734
|
+
"options": [
|
|
1735
|
+
"podSelector: {role: backend}, ingress: [{from: [{podSelector: {matchLabels: {role: frontend}}}], ports: [{port: 5432}]}]",
|
|
1736
|
+
"All traffic is allowed by default; no NetworkPolicy needed",
|
|
1737
|
+
"podSelector: {role: frontend}, egress: [{to: [{podSelector: {matchLabels: {role: backend}}}]}]",
|
|
1738
|
+
"Create two NetworkPolicies: one for ingress on backend, one for egress on frontend"
|
|
1739
|
+
],
|
|
1740
|
+
"correct": 0,
|
|
1741
|
+
"explanation": "The NetworkPolicy selects backend Pods (podSelector), then defines ingress rules allowing traffic from frontend Pods on port 5432. This is applied in the same namespace as the backend Pods."
|
|
1742
|
+
},
|
|
1743
|
+
{
|
|
1744
|
+
"id": 15,
|
|
1745
|
+
"domain": "App Design and Build",
|
|
1746
|
+
"question": "A CronJob schedule '0 2 * * *' will run the job at what time?",
|
|
1747
|
+
"options": [
|
|
1748
|
+
"Every 2 minutes",
|
|
1749
|
+
"Every 2 hours",
|
|
1750
|
+
"At 2:00 AM every day",
|
|
1751
|
+
"At 2:00 AM every Monday"
|
|
1752
|
+
],
|
|
1753
|
+
"correct": 2,
|
|
1754
|
+
"explanation": "Cron format: minute hour day-of-month month day-of-week. '0 2 * * *' = minute 0, hour 2, any day/month/weekday = 2:00 AM daily. Use crontab.guru to verify cron expressions."
|
|
1755
|
+
},
|
|
1756
|
+
{
|
|
1757
|
+
"id": 16,
|
|
1758
|
+
"domain": "App Deployment",
|
|
1759
|
+
"question": "What is the difference between 'helm upgrade' and 'helm upgrade --install'?",
|
|
1760
|
+
"options": [
|
|
1761
|
+
"No difference, both commands are identical",
|
|
1762
|
+
"--install creates the release if it doesn't exist, upgrade only updates existing releases",
|
|
1763
|
+
"--install skips the upgrade and only installs fresh",
|
|
1764
|
+
"--install uses the latest chart version automatically"
|
|
1765
|
+
],
|
|
1766
|
+
"correct": 1,
|
|
1767
|
+
"explanation": "helm upgrade requires the release to already exist. helm upgrade --install is idempotent: it installs if the release doesn't exist, upgrades if it does. This is ideal for CI/CD pipelines."
|
|
1768
|
+
},
|
|
1769
|
+
{
|
|
1770
|
+
"id": 17,
|
|
1771
|
+
"domain": "App Environment, Config & Security",
|
|
1772
|
+
"question": "A ConfigMap key 'app.conf' contains a multi-line configuration file. How do you mount it in a Pod at /etc/app/app.conf?",
|
|
1773
|
+
"options": [
|
|
1774
|
+
"Use envFrom: configMapRef",
|
|
1775
|
+
"Use a volume with configMap.items to map the key to a specific path, then volumeMount at /etc/app",
|
|
1776
|
+
"Use env.valueFrom.configMapKeyRef",
|
|
1777
|
+
"Copy the file into the container image"
|
|
1778
|
+
],
|
|
1779
|
+
"correct": 1,
|
|
1780
|
+
"explanation": "Mount a ConfigMap as a volume: volumes[].configMap.items maps specific keys to paths. Then volumeMounts mounts at /etc/app. The file will appear at /etc/app/app.conf. env injection is for env vars, not files."
|
|
1781
|
+
},
|
|
1782
|
+
{
|
|
1783
|
+
"id": 18,
|
|
1784
|
+
"domain": "App Observability",
|
|
1785
|
+
"question": "You want to check resource usage of Pods in namespace 'production'. Which command shows CPU and memory consumption?",
|
|
1786
|
+
"options": [
|
|
1787
|
+
"kubectl describe pod -n production",
|
|
1788
|
+
"kubectl top pod -n production",
|
|
1789
|
+
"kubectl get pod -n production -o wide",
|
|
1790
|
+
"kubectl logs -n production"
|
|
1791
|
+
],
|
|
1792
|
+
"correct": 1,
|
|
1793
|
+
"explanation": "kubectl top pod shows current CPU and memory usage per Pod (requires metrics-server). kubectl describe shows limits/requests but not actual usage. kubectl top node shows node-level usage."
|
|
1794
|
+
},
|
|
1795
|
+
{
|
|
1796
|
+
"id": 19,
|
|
1797
|
+
"domain": "App Design and Build",
|
|
1798
|
+
"question": "Two containers in the same Pod need to share data in memory (not persisted to disk). Which volume type achieves this?",
|
|
1799
|
+
"options": [
|
|
1800
|
+
"hostPath",
|
|
1801
|
+
"emptyDir with medium: Memory",
|
|
1802
|
+
"PersistentVolumeClaim",
|
|
1803
|
+
"configMap volume"
|
|
1804
|
+
],
|
|
1805
|
+
"correct": 1,
|
|
1806
|
+
"explanation": "emptyDir with medium: Memory creates a tmpfs (RAM-backed) volume. It's shared between all containers in the Pod and disappears when the Pod is removed. Default emptyDir uses disk storage."
|
|
1807
|
+
},
|
|
1808
|
+
{
|
|
1809
|
+
"id": 20,
|
|
1810
|
+
"domain": "Services & Networking",
|
|
1811
|
+
"question": "You need a Service that doesn't have a cluster IP and returns the IPs of individual Pods directly. Which Service type achieves this?",
|
|
1812
|
+
"options": [
|
|
1813
|
+
"ClusterIP with type: None",
|
|
1814
|
+
"Headless Service (clusterIP: None)",
|
|
1815
|
+
"NodePort",
|
|
1816
|
+
"ExternalName"
|
|
1817
|
+
],
|
|
1818
|
+
"correct": 1,
|
|
1819
|
+
"explanation": "A Headless Service (clusterIP: None) skips kube-proxy and returns Pod IPs directly via DNS. Used by StatefulSets for stable network identities and by clients that need direct Pod connections (e.g., databases, Cassandra)."
|
|
1820
|
+
}
|
|
1821
|
+
]
|
|
763
1822
|
}
|
|
764
1823
|
]
|