@xdev-asia/xdev-knowledge-mcp 1.0.51 → 1.0.53

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -28,4 +28,4 @@ Chúng tôi không bán hoặc chia sẻ thông tin cá nhân của bạn cho b
28
28
  ### 5. Cookie
29
29
  Chúng tôi sử dụng cookie để duy trì phiên đăng nhập và cải thiện trải nghiệm người dùng. Bạn có thể tắt cookie trong trình duyệt.
30
30
  ### 6. Liên hệ
31
- Nếu có câu hỏi về chính sách quyền riêng tư, vui lòng liên hệ qua email quản trị của website.
31
+ Nếu có câu hỏi về chính sách quyền riêng tư, vui lòng liên hệ qua email: **duy@xdev.asia**.
@@ -96,5 +96,5 @@ We may update this Privacy Policy from time to time. We will notify you of any c
96
96
 
97
97
  ## 11. Contact Us
98
98
  If you have any questions about this Privacy Policy, please contact us:
99
- - **Website:** [https://lms.xdev.asia](https://lms.xdev.asia)
100
- - **Email:** privacy@xdev.asia
99
+ - **Website:** [https://xdev.asia](https://xdev.asia)
100
+ - **Email:** duy@xdev.asia
@@ -33,7 +33,7 @@ Bạn có thể yêu cầu xóa toàn bộ dữ liệu cá nhân theo một tron
33
33
 
34
34
  **Cách 1: Qua email**
35
35
 
36
- Gửi email đến **<admin@xdev.asia>a>a>a>** với tiêu đề **"Yêu cầu xóa tài khoản"** và nội dung bao gồm:
36
+ Gửi email đến **duy@xdev.asia** với tiêu đề **"Yêu cầu xóa tài khoản"** và nội dung bao gồm:
37
37
 
38
38
  - Địa chỉ email đã đăng ký
39
39
  - Tên hiển thị trên tài khoản
@@ -60,9 +60,7 @@ Khi yêu cầu được chấp thuận, chúng tôi sẽ xóa:
60
60
 
61
61
  ### Liên hệ
62
62
 
63
- <admin@xdev.asia>
64
- Nếu bạn có câ<admin@xdev.asia>riêng tư hoặc xử lý dữ liệu, vui lòng liên hệ:
65
- <admin@xdev.asia>
63
+ Nếu bạn có câu hỏi về quyền riêng tư hoặc xử lý dữ liệu, vui lòng liên hệ:
66
64
 
67
- - **Email:** <admin@xdev.asia>
65
+ - **Email:** duy@xdev.asia
68
66
  - **Chính sách bảo mật:** [xdev.asia/pages/chinh-sach-quyen-rieng-tu/](/pages/chinh-sach-quyen-rieng-tu/)
@@ -0,0 +1,319 @@
1
+ {
2
+ "id": "cka",
3
+ "title": "CKA — Certified Kubernetes Administrator",
4
+ "slug": "cka",
5
+ "description": "Practice exam for CKA — 20 questions covering all 5 domains",
6
+ "icon": "award",
7
+ "provider": "CNCF",
8
+ "level": "Professional",
9
+ "duration_minutes": 45,
10
+ "passing_score": 66,
11
+ "questions_count": 20,
12
+ "tags": ["Kubernetes", "CKA", "CNCF", "DevOps", "Linux Foundation"],
13
+ "series_slug": "luyen-thi-cka",
14
+ "domains": [
15
+ {
16
+ "name": "Domain 1: Cluster Architecture, Installation & Configuration",
17
+ "weight": 25,
18
+ "lessons": [
19
+ { "title": "Bài 1: Kubernetes Architecture & kubeadm Cluster Setup", "slug": "01-kien-truc-cka-kubeadm" },
20
+ { "title": "Bài 2: Cluster Upgrade với kubeadm", "slug": "02-cluster-upgrade-kubeadm" },
21
+ { "title": "Bài 3: RBAC — Role-Based Access Control", "slug": "03-rbac-cka" }
22
+ ]
23
+ },
24
+ {
25
+ "name": "Domain 2: Workloads & Scheduling",
26
+ "weight": 15,
27
+ "lessons": [
28
+ { "title": "Bài 4: Deployments, DaemonSets & StatefulSets", "slug": "04-deployments-daemonsets-statefulsets" },
29
+ { "title": "Bài 5: Scheduling — Taints, Tolerations & Affinity", "slug": "05-scheduling-taints-affinity" }
30
+ ]
31
+ },
32
+ {
33
+ "name": "Domain 3: Services & Networking",
34
+ "weight": 20,
35
+ "lessons": [
36
+ { "title": "Bài 6: Services, Endpoints & CoreDNS", "slug": "06-services-endpoints-coredns" },
37
+ { "title": "Bài 7: Ingress, Network Policies & CNI", "slug": "07-ingress-networkpolicies-cni" }
38
+ ]
39
+ },
40
+ {
41
+ "name": "Domain 4: Storage",
42
+ "weight": 10,
43
+ "lessons": [
44
+ { "title": "Bài 8: Persistent Volumes, PVCs & StorageClass", "slug": "08-persistent-volumes-storageclass" }
45
+ ]
46
+ },
47
+ {
48
+ "name": "Domain 5: Troubleshooting",
49
+ "weight": 30,
50
+ "lessons": [
51
+ { "title": "Bài 9: etcd Backup & Restore", "slug": "09-etcd-backup-restore" },
52
+ { "title": "Bài 10: Troubleshooting Nodes & Cluster", "slug": "10-troubleshooting-nodes" },
53
+ { "title": "Bài 11: Troubleshooting Workloads", "slug": "11-troubleshooting-workloads" }
54
+ ]
55
+ }
56
+ ],
57
+ "questions": [
58
+ {
59
+ "id": 1,
60
+ "domain": "Domain 1: Cluster Architecture, Installation & Configuration",
61
+ "question": "You need to upgrade a Kubernetes cluster from v1.29 to v1.30 using kubeadm. What is the CORRECT order of operations?",
62
+ "options": [
63
+ "Upgrade worker nodes first, then upgrade control plane",
64
+ "Upgrade kubeadm on control plane, run kubeadm upgrade plan/apply, upgrade kubelet and kubectl, then upgrade worker nodes",
65
+ "Upgrade kubelet on all nodes simultaneously",
66
+ "Upgrade kubectl first, then kubeadm, then kubelet"
67
+ ],
68
+ "correct": 1,
69
+ "explanation": "The correct upgrade order is: upgrade kubeadm → run kubeadm upgrade plan → kubeadm upgrade apply on the control plane → upgrade kubelet and kubectl on control plane → then repeat for each worker node (drain, upgrade kubeadm/kubelet, uncordon)."
70
+ },
71
+ {
72
+ "id": 2,
73
+ "domain": "Domain 1: Cluster Architecture, Installation & Configuration",
74
+ "question": "Which kubectl command checks if a user has permission to create deployments in the 'production' namespace?",
75
+ "options": [
76
+ "kubectl get roles -n production",
77
+ "kubectl auth can-i create deployments -n production",
78
+ "kubectl describe clusterrole admin",
79
+ "kubectl check-access deployments -n production"
80
+ ],
81
+ "correct": 1,
82
+ "explanation": "kubectl auth can-i checks whether an action is allowed. 'kubectl auth can-i create deployments -n production' checks if the current user can create deployments in the production namespace."
83
+ },
84
+ {
85
+ "id": 3,
86
+ "domain": "Domain 1: Cluster Architecture, Installation & Configuration",
87
+ "question": "A RoleBinding binds a ClusterRole to a user in namespace 'dev'. What is the scope of the permissions?",
88
+ "options": [
89
+ "Cluster-wide, because ClusterRole is cluster-scoped",
90
+ "Limited to the 'dev' namespace only",
91
+ "All namespaces except 'dev'",
92
+ "It will result in an error — ClusterRole cannot be used with RoleBinding"
93
+ ],
94
+ "correct": 1,
95
+ "explanation": "When a RoleBinding references a ClusterRole, the permissions are scoped to the RoleBinding's namespace only. This is a common pattern to reuse a set of permissions defined in a ClusterRole across multiple namespaces."
96
+ },
97
+ {
98
+ "id": 4,
99
+ "domain": "Domain 1: Cluster Architecture, Installation & Configuration",
100
+ "question": "What is the Kubernetes version skew policy for kubelet relative to kube-apiserver?",
101
+ "options": [
102
+ "kubelet must be the exact same version as kube-apiserver",
103
+ "kubelet can be up to 3 minor versions older than kube-apiserver",
104
+ "kubelet can be up to 2 minor versions older than kube-apiserver, but not newer",
105
+ "There is no version requirement between kubelet and kube-apiserver"
106
+ ],
107
+ "correct": 2,
108
+ "explanation": "kubelet may be up to two minor versions older than kube-apiserver but must not be newer. For example, kube-apiserver 1.30 supports kubelet 1.28, 1.29, and 1.30."
109
+ },
110
+ {
111
+ "id": 5,
112
+ "domain": "Domain 2: Workloads & Scheduling",
113
+ "question": "A pod is in 'Pending' state. kubectl describe shows 'insufficient cpu'. What is the MOST likely cause?",
114
+ "options": [
115
+ "The container image is too large",
116
+ "No node has enough allocatable CPU to satisfy the pod's resource requests",
117
+ "The pod has a failed liveness probe",
118
+ "The pod's service account doesn't have sufficient permissions"
119
+ ],
120
+ "correct": 1,
121
+ "explanation": "When a pod is Pending with 'insufficient cpu', it means the scheduler cannot find a node with enough available CPU resources to satisfy the pod's resource requests. Solutions include adding nodes, reducing requests, or removing resource-heavy pods."
122
+ },
123
+ {
124
+ "id": 6,
125
+ "domain": "Domain 2: Workloads & Scheduling",
126
+ "question": "How do you ensure a pod is scheduled ONLY on nodes with the label 'disk=ssd'?",
127
+ "options": [
128
+ "Use a taint on the nodes with disk=ssd",
129
+ "Use nodeSelector with disk: ssd in the pod spec",
130
+ "Use a PodDisruptionBudget",
131
+ "Use podAntiAffinity"
132
+ ],
133
+ "correct": 1,
134
+ "explanation": "nodeSelector is the simplest way to constrain pods to nodes with specific labels. Adding 'nodeSelector: { disk: ssd }' to the pod spec ensures the pod is only scheduled on nodes with that label."
135
+ },
136
+ {
137
+ "id": 7,
138
+ "domain": "Domain 2: Workloads & Scheduling",
139
+ "question": "What is the effect of adding a taint 'key=value:NoSchedule' to a node?",
140
+ "options": [
141
+ "All existing pods without a matching toleration are evicted immediately",
142
+ "No new pods will be scheduled on the node unless they have a matching toleration",
143
+ "The node is removed from the cluster",
144
+ "All pods on the node are restarted"
145
+ ],
146
+ "correct": 1,
147
+ "explanation": "NoSchedule prevents new pods without a matching toleration from being scheduled on the node. Existing pods are NOT affected. Use NoExecute to also evict existing non-tolerating pods."
148
+ },
149
+ {
150
+ "id": 8,
151
+ "domain": "Domain 3: Services & Networking",
152
+ "question": "What is the DNS format for resolving a Service named 'my-svc' in namespace 'my-ns'?",
153
+ "options": [
154
+ "my-svc.my-ns",
155
+ "my-svc.my-ns.svc.cluster.local",
156
+ "my-ns.my-svc.cluster.local",
157
+ "svc.my-svc.my-ns.cluster.local"
158
+ ],
159
+ "correct": 1,
160
+ "explanation": "Kubernetes DNS follows the format: <service-name>.<namespace>.svc.cluster.local. Within the same namespace, you can use just the service name. Across namespaces, use <service-name>.<namespace>."
161
+ },
162
+ {
163
+ "id": 9,
164
+ "domain": "Domain 3: Services & Networking",
165
+ "question": "An Ingress resource routes traffic to a backend service. The Ingress controller pod is running but rules are not taking effect. What should you check FIRST?",
166
+ "options": [
167
+ "Whether the Ingress has the correct ingressClassName",
168
+ "Whether etcd is healthy",
169
+ "Whether the node has enough memory",
170
+ "Whether kube-scheduler is running"
171
+ ],
172
+ "correct": 0,
173
+ "explanation": "If Ingress rules are not working despite the controller running, the first check should be ingressClassName. In Kubernetes 1.22+, Ingress resources need to specify the correct IngressClass to be picked up by the right controller."
174
+ },
175
+ {
176
+ "id": 10,
177
+ "domain": "Domain 3: Services & Networking",
178
+ "question": "You created a NetworkPolicy with an empty podSelector {}. What is the effect?",
179
+ "options": [
180
+ "It selects no pods in the namespace",
181
+ "It selects all pods in the namespace",
182
+ "It causes a validation error",
183
+ "It applies to the entire cluster"
184
+ ],
185
+ "correct": 1,
186
+ "explanation": "An empty podSelector {} selects ALL pods in the namespace. A NetworkPolicy with spec.podSelector: {} and no ingress/egress rules effectively creates a default-deny policy for all pods in that namespace."
187
+ },
188
+ {
189
+ "id": 11,
190
+ "domain": "Domain 3: Services & Networking",
191
+ "question": "Which kube-proxy mode is MOST performant for large clusters with many Services?",
192
+ "options": [
193
+ "userspace mode",
194
+ "iptables mode",
195
+ "IPVS mode",
196
+ "nftables mode"
197
+ ],
198
+ "correct": 2,
199
+ "explanation": "IPVS (IP Virtual Server) mode uses hash tables for Service-to-Pod mapping, providing O(1) lookup performance regardless of the number of Services, making it much more efficient than iptables for large clusters."
200
+ },
201
+ {
202
+ "id": 12,
203
+ "domain": "Domain 4: Storage",
204
+ "question": "A PersistentVolume has reclaimPolicy set to 'Retain'. What happens when the associated PVC is deleted?",
205
+ "options": [
206
+ "The PV and its data are automatically deleted",
207
+ "The PV remains with its data intact but it becomes 'Released' and cannot be bound to a new PVC without manual intervention",
208
+ "The PV is immediately available for a new PVC to claim",
209
+ "The data is archived to an object store"
210
+ ],
211
+ "correct": 1,
212
+ "explanation": "With Retain policy, when the PVC is deleted, the PV enters 'Released' state. The data is preserved, but the PV cannot be rebound automatically. An admin must manually clean up and reconfigure the PV for reuse."
213
+ },
214
+ {
215
+ "id": 13,
216
+ "domain": "Domain 4: Storage",
217
+ "question": "Which access mode allows a PersistentVolume to be mounted as read-write by multiple nodes simultaneously?",
218
+ "options": [
219
+ "ReadWriteOnce (RWO)",
220
+ "ReadOnlyMany (ROX)",
221
+ "ReadWriteMany (RWX)",
222
+ "ReadWriteOncePod (RWOP)"
223
+ ],
224
+ "correct": 2,
225
+ "explanation": "ReadWriteMany (RWX) allows the volume to be mounted as read-write by many nodes simultaneously. This is commonly supported by network file systems like NFS, CephFS, and cloud-native shared file storage."
226
+ },
227
+ {
228
+ "id": 14,
229
+ "domain": "Domain 5: Troubleshooting",
230
+ "question": "You need to take a snapshot of etcd for backup. Which tool and flags are required?",
231
+ "options": [
232
+ "kubectl backup etcd --output=/backup/snapshot.db",
233
+ "etcdctl snapshot save /backup/snapshot.db --endpoints --cacert --cert --key",
234
+ "kubeadm backup etcd --snapshot-dir=/backup",
235
+ "etcdctl backup --data-dir=/var/lib/etcd"
236
+ ],
237
+ "correct": 1,
238
+ "explanation": "etcdctl snapshot save is the correct command. It requires TLS flags: --endpoints (etcd address), --cacert (CA certificate), --cert (server certificate), and --key (server key), all found in the etcd pod manifest."
239
+ },
240
+ {
241
+ "id": 15,
242
+ "domain": "Domain 5: Troubleshooting",
243
+ "question": "A node shows 'NotReady' status. Which is the FIRST thing to check?",
244
+ "options": [
245
+ "Check if kube-proxy is running",
246
+ "Check if kubelet is running on the node (systemctl status kubelet)",
247
+ "Check if CoreDNS pods are healthy",
248
+ "Check if the container images are cached"
249
+ ],
250
+ "correct": 1,
251
+ "explanation": "A NotReady node typically means kubelet is not communicating with the API server. The first step is to SSH into the node and check kubelet status with 'systemctl status kubelet', then check logs with 'journalctl -u kubelet'."
252
+ },
253
+ {
254
+ "id": 16,
255
+ "domain": "Domain 5: Troubleshooting",
256
+ "question": "A pod is in CrashLoopBackOff state. Which command is MOST useful to diagnose the issue?",
257
+ "options": [
258
+ "kubectl get events",
259
+ "kubectl logs <pod-name> --previous",
260
+ "kubectl describe node",
261
+ "kubectl top pod"
262
+ ],
263
+ "correct": 1,
264
+ "explanation": "kubectl logs <pod-name> --previous shows logs from the previous crashed container instance. This reveals the actual error (e.g., application error, missing config) that caused the crash."
265
+ },
266
+ {
267
+ "id": 17,
268
+ "domain": "Domain 5: Troubleshooting",
269
+ "question": "A Deployment's pods cannot reach a ClusterIP Service. The Service has endpoints. What should you check?",
270
+ "options": [
271
+ "Whether the Service has the correct selector matching the pod labels",
272
+ "Whether there's a NetworkPolicy blocking traffic between the source pods and the Service",
273
+ "Whether the Deployment has enough replicas",
274
+ "Whether the node has sufficient disk space"
275
+ ],
276
+ "correct": 1,
277
+ "explanation": "If the Service has endpoints (backends are matched), but pods can't reach it, a NetworkPolicy may be blocking traffic. Check for NetworkPolicies in both the source and destination namespaces that could restrict ingress or egress."
278
+ },
279
+ {
280
+ "id": 18,
281
+ "domain": "Domain 5: Troubleshooting",
282
+ "question": "After restoring etcd from a snapshot, the API server shows stale data. What is the MOST likely cause?",
283
+ "options": [
284
+ "The snapshot file was corrupted",
285
+ "The etcd data-dir was not updated to point to the restored snapshot location",
286
+ "kube-apiserver needs a version upgrade",
287
+ "CoreDNS needs to be restarted"
288
+ ],
289
+ "correct": 1,
290
+ "explanation": "After restoring, you must update the etcd pod manifest (or systemd unit) to point --data-dir to the new restored directory. If the old data-dir is still being used, etcd will serve stale data."
291
+ },
292
+ {
293
+ "id": 19,
294
+ "domain": "Domain 5: Troubleshooting",
295
+ "question": "A pod is stuck in 'ImagePullBackOff' state. Which is NOT a possible cause?",
296
+ "options": [
297
+ "The image name or tag is incorrect",
298
+ "The container registry requires authentication and no imagePullSecret is configured",
299
+ "The node has insufficient CPU resources",
300
+ "Network connectivity to the registry is blocked"
301
+ ],
302
+ "correct": 2,
303
+ "explanation": "ImagePullBackOff indicates the kubelet cannot pull the container image. This can be due to wrong image name, missing registry credentials, or network issues — but NOT CPU resources. CPU issues cause Pending state, not ImagePullBackOff."
304
+ },
305
+ {
306
+ "id": 20,
307
+ "domain": "Domain 5: Troubleshooting",
308
+ "question": "How would you check which static pod manifests are being used on a node?",
309
+ "options": [
310
+ "kubectl get staticpods",
311
+ "Check the directory specified by kubelet's --pod-manifest-path flag (default: /etc/kubernetes/manifests/)",
312
+ "kubectl describe node | grep StaticPods",
313
+ "etcdctl get /registry/pods"
314
+ ],
315
+ "correct": 1,
316
+ "explanation": "Static pods are defined by manifest files in the directory configured via --pod-manifest-path (default /etc/kubernetes/manifests/). kubelet watches this directory and automatically creates/deletes pods based on these manifests."
317
+ }
318
+ ]
319
+ }