@sonde/packs 0.1.0 → 0.1.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.turbo/turbo-build.log +6 -0
- package/.turbo/turbo-test.log +814 -0
- package/.turbo/turbo-typecheck.log +4 -0
- package/CHANGELOG.md +10 -0
- package/dist/index.d.ts +16 -1
- package/dist/index.d.ts.map +1 -1
- package/dist/index.js +40 -2
- package/dist/index.js.map +1 -1
- package/dist/integrations/citrix.d.ts +13 -0
- package/dist/integrations/citrix.d.ts.map +1 -0
- package/dist/integrations/citrix.js +420 -0
- package/dist/integrations/citrix.js.map +1 -0
- package/dist/integrations/citrix.test.d.ts +2 -0
- package/dist/integrations/citrix.test.d.ts.map +1 -0
- package/dist/integrations/citrix.test.js +464 -0
- package/dist/integrations/citrix.test.js.map +1 -0
- package/dist/integrations/graph.d.ts +9 -0
- package/dist/integrations/graph.d.ts.map +1 -0
- package/dist/integrations/graph.js +290 -0
- package/dist/integrations/graph.js.map +1 -0
- package/dist/integrations/graph.test.d.ts +2 -0
- package/dist/integrations/graph.test.d.ts.map +1 -0
- package/dist/integrations/graph.test.js +356 -0
- package/dist/integrations/graph.test.js.map +1 -0
- package/dist/integrations/httpbin.d.ts +3 -0
- package/dist/integrations/httpbin.d.ts.map +1 -0
- package/dist/integrations/httpbin.js +70 -0
- package/dist/integrations/httpbin.js.map +1 -0
- package/dist/integrations/nutanix.d.ts +18 -0
- package/dist/integrations/nutanix.d.ts.map +1 -0
- package/dist/integrations/nutanix.js +1121 -0
- package/dist/integrations/nutanix.js.map +1 -0
- package/dist/integrations/nutanix.test.d.ts +2 -0
- package/dist/integrations/nutanix.test.d.ts.map +1 -0
- package/dist/integrations/nutanix.test.js +978 -0
- package/dist/integrations/nutanix.test.js.map +1 -0
- package/dist/integrations/proxmox.d.ts +12 -0
- package/dist/integrations/proxmox.d.ts.map +1 -0
- package/dist/integrations/proxmox.js +733 -0
- package/dist/integrations/proxmox.js.map +1 -0
- package/dist/integrations/proxmox.test.d.ts +2 -0
- package/dist/integrations/proxmox.test.d.ts.map +1 -0
- package/dist/integrations/proxmox.test.js +697 -0
- package/dist/integrations/proxmox.test.js.map +1 -0
- package/dist/integrations/servicenow.d.ts +3 -0
- package/dist/integrations/servicenow.d.ts.map +1 -0
- package/dist/integrations/servicenow.js +257 -0
- package/dist/integrations/servicenow.js.map +1 -0
- package/dist/integrations/servicenow.test.d.ts +2 -0
- package/dist/integrations/servicenow.test.d.ts.map +1 -0
- package/dist/integrations/servicenow.test.js +217 -0
- package/dist/integrations/servicenow.test.js.map +1 -0
- package/dist/integrations/splunk.d.ts +9 -0
- package/dist/integrations/splunk.d.ts.map +1 -0
- package/dist/integrations/splunk.js +242 -0
- package/dist/integrations/splunk.js.map +1 -0
- package/dist/integrations/splunk.test.d.ts +2 -0
- package/dist/integrations/splunk.test.d.ts.map +1 -0
- package/dist/integrations/splunk.test.js +323 -0
- package/dist/integrations/splunk.test.js.map +1 -0
- package/dist/mysql/index.d.ts +3 -0
- package/dist/mysql/index.d.ts.map +1 -0
- package/dist/mysql/index.js +13 -0
- package/dist/mysql/index.js.map +1 -0
- package/dist/mysql/manifest.d.ts +3 -0
- package/dist/mysql/manifest.d.ts.map +1 -0
- package/dist/mysql/manifest.js +69 -0
- package/dist/mysql/manifest.js.map +1 -0
- package/dist/mysql/probes/databases-list.d.ts +13 -0
- package/dist/mysql/probes/databases-list.d.ts.map +1 -0
- package/dist/mysql/probes/databases-list.js +31 -0
- package/dist/mysql/probes/databases-list.js.map +1 -0
- package/dist/mysql/probes/databases-list.test.d.ts +2 -0
- package/dist/mysql/probes/databases-list.test.d.ts.map +1 -0
- package/dist/mysql/probes/databases-list.test.js +54 -0
- package/dist/mysql/probes/databases-list.test.js.map +1 -0
- package/dist/mysql/probes/processlist.d.ts +18 -0
- package/dist/mysql/probes/processlist.d.ts.map +1 -0
- package/dist/mysql/probes/processlist.js +36 -0
- package/dist/mysql/probes/processlist.js.map +1 -0
- package/dist/mysql/probes/processlist.test.d.ts +2 -0
- package/dist/mysql/probes/processlist.test.d.ts.map +1 -0
- package/dist/mysql/probes/processlist.test.js +41 -0
- package/dist/mysql/probes/processlist.test.js.map +1 -0
- package/dist/mysql/probes/status.d.ts +14 -0
- package/dist/mysql/probes/status.d.ts.map +1 -0
- package/dist/mysql/probes/status.js +40 -0
- package/dist/mysql/probes/status.js.map +1 -0
- package/dist/mysql/probes/status.test.d.ts +2 -0
- package/dist/mysql/probes/status.test.d.ts.map +1 -0
- package/dist/mysql/probes/status.test.js +43 -0
- package/dist/mysql/probes/status.test.js.map +1 -0
- package/dist/nginx/index.d.ts +3 -0
- package/dist/nginx/index.d.ts.map +1 -0
- package/dist/nginx/index.js +13 -0
- package/dist/nginx/index.js.map +1 -0
- package/dist/nginx/manifest.d.ts +3 -0
- package/dist/nginx/manifest.d.ts.map +1 -0
- package/dist/nginx/manifest.js +68 -0
- package/dist/nginx/manifest.js.map +1 -0
- package/dist/nginx/probes/access-log-tail.d.ts +9 -0
- package/dist/nginx/probes/access-log-tail.d.ts.map +1 -0
- package/dist/nginx/probes/access-log-tail.js +14 -0
- package/dist/nginx/probes/access-log-tail.js.map +1 -0
- package/dist/nginx/probes/access-log-tail.test.d.ts +2 -0
- package/dist/nginx/probes/access-log-tail.test.d.ts.map +1 -0
- package/dist/nginx/probes/access-log-tail.test.js +40 -0
- package/dist/nginx/probes/access-log-tail.test.js.map +1 -0
- package/dist/nginx/probes/config-test.d.ts +8 -0
- package/dist/nginx/probes/config-test.d.ts.map +1 -0
- package/dist/nginx/probes/config-test.js +18 -0
- package/dist/nginx/probes/config-test.js.map +1 -0
- package/dist/nginx/probes/config-test.test.d.ts +2 -0
- package/dist/nginx/probes/config-test.test.d.ts.map +1 -0
- package/dist/nginx/probes/config-test.test.js +35 -0
- package/dist/nginx/probes/config-test.test.js.map +1 -0
- package/dist/nginx/probes/error-log-tail.d.ts +9 -0
- package/dist/nginx/probes/error-log-tail.d.ts.map +1 -0
- package/dist/nginx/probes/error-log-tail.js +14 -0
- package/dist/nginx/probes/error-log-tail.js.map +1 -0
- package/dist/nginx/probes/error-log-tail.test.d.ts +2 -0
- package/dist/nginx/probes/error-log-tail.test.d.ts.map +1 -0
- package/dist/nginx/probes/error-log-tail.test.js +34 -0
- package/dist/nginx/probes/error-log-tail.test.js.map +1 -0
- package/dist/postgres/index.d.ts +3 -0
- package/dist/postgres/index.d.ts.map +1 -0
- package/dist/postgres/index.js +13 -0
- package/dist/postgres/index.js.map +1 -0
- package/dist/postgres/manifest.d.ts +3 -0
- package/dist/postgres/manifest.d.ts.map +1 -0
- package/dist/postgres/manifest.js +90 -0
- package/dist/postgres/manifest.js.map +1 -0
- package/dist/postgres/probes/connections-active.d.ts +17 -0
- package/dist/postgres/probes/connections-active.d.ts.map +1 -0
- package/dist/postgres/probes/connections-active.js +37 -0
- package/dist/postgres/probes/connections-active.js.map +1 -0
- package/dist/postgres/probes/connections-active.test.d.ts +2 -0
- package/dist/postgres/probes/connections-active.test.d.ts.map +1 -0
- package/dist/postgres/probes/connections-active.test.js +36 -0
- package/dist/postgres/probes/connections-active.test.js.map +1 -0
- package/dist/postgres/probes/databases-list.d.ts +14 -0
- package/dist/postgres/probes/databases-list.d.ts.map +1 -0
- package/dist/postgres/probes/databases-list.js +34 -0
- package/dist/postgres/probes/databases-list.js.map +1 -0
- package/dist/postgres/probes/databases-list.test.d.ts +2 -0
- package/dist/postgres/probes/databases-list.test.d.ts.map +1 -0
- package/dist/postgres/probes/databases-list.test.js +49 -0
- package/dist/postgres/probes/databases-list.test.js.map +1 -0
- package/dist/postgres/probes/query-slow.d.ts +17 -0
- package/dist/postgres/probes/query-slow.d.ts.map +1 -0
- package/dist/postgres/probes/query-slow.js +37 -0
- package/dist/postgres/probes/query-slow.js.map +1 -0
- package/dist/postgres/probes/query-slow.test.d.ts +2 -0
- package/dist/postgres/probes/query-slow.test.d.ts.map +1 -0
- package/dist/postgres/probes/query-slow.test.js +30 -0
- package/dist/postgres/probes/query-slow.test.js.map +1 -0
- package/dist/proxmox/index.d.ts +3 -0
- package/dist/proxmox/index.d.ts.map +1 -0
- package/dist/proxmox/index.js +23 -0
- package/dist/proxmox/index.js.map +1 -0
- package/dist/proxmox/manifest.d.ts +3 -0
- package/dist/proxmox/manifest.d.ts.map +1 -0
- package/dist/proxmox/manifest.js +75 -0
- package/dist/proxmox/manifest.js.map +1 -0
- package/dist/proxmox/probes/ceph-status.d.ts +36 -0
- package/dist/proxmox/probes/ceph-status.d.ts.map +1 -0
- package/dist/proxmox/probes/ceph-status.js +71 -0
- package/dist/proxmox/probes/ceph-status.js.map +1 -0
- package/dist/proxmox/probes/ceph-status.test.d.ts +2 -0
- package/dist/proxmox/probes/ceph-status.test.d.ts.map +1 -0
- package/dist/proxmox/probes/ceph-status.test.js +115 -0
- package/dist/proxmox/probes/ceph-status.test.js.map +1 -0
- package/dist/proxmox/probes/cluster-config.d.ts +31 -0
- package/dist/proxmox/probes/cluster-config.d.ts.map +1 -0
- package/dist/proxmox/probes/cluster-config.js +72 -0
- package/dist/proxmox/probes/cluster-config.js.map +1 -0
- package/dist/proxmox/probes/cluster-config.test.d.ts +2 -0
- package/dist/proxmox/probes/cluster-config.test.d.ts.map +1 -0
- package/dist/proxmox/probes/cluster-config.test.js +107 -0
- package/dist/proxmox/probes/cluster-config.test.js.map +1 -0
- package/dist/proxmox/probes/ha-status.d.ts +18 -0
- package/dist/proxmox/probes/ha-status.d.ts.map +1 -0
- package/dist/proxmox/probes/ha-status.js +38 -0
- package/dist/proxmox/probes/ha-status.js.map +1 -0
- package/dist/proxmox/probes/ha-status.test.d.ts +2 -0
- package/dist/proxmox/probes/ha-status.test.d.ts.map +1 -0
- package/dist/proxmox/probes/ha-status.test.js +66 -0
- package/dist/proxmox/probes/ha-status.test.js.map +1 -0
- package/dist/proxmox/probes/lvm.d.ts +35 -0
- package/dist/proxmox/probes/lvm.d.ts.map +1 -0
- package/dist/proxmox/probes/lvm.js +75 -0
- package/dist/proxmox/probes/lvm.js.map +1 -0
- package/dist/proxmox/probes/lvm.test.d.ts +2 -0
- package/dist/proxmox/probes/lvm.test.d.ts.map +1 -0
- package/dist/proxmox/probes/lvm.test.js +128 -0
- package/dist/proxmox/probes/lvm.test.js.map +1 -0
- package/dist/proxmox/probes/lxc-config.d.ts +29 -0
- package/dist/proxmox/probes/lxc-config.d.ts.map +1 -0
- package/dist/proxmox/probes/lxc-config.js +67 -0
- package/dist/proxmox/probes/lxc-config.js.map +1 -0
- package/dist/proxmox/probes/lxc-config.test.d.ts +2 -0
- package/dist/proxmox/probes/lxc-config.test.d.ts.map +1 -0
- package/dist/proxmox/probes/lxc-config.test.js +77 -0
- package/dist/proxmox/probes/lxc-config.test.js.map +1 -0
- package/dist/proxmox/probes/lxc-list.d.ts +20 -0
- package/dist/proxmox/probes/lxc-list.d.ts.map +1 -0
- package/dist/proxmox/probes/lxc-list.js +49 -0
- package/dist/proxmox/probes/lxc-list.js.map +1 -0
- package/dist/proxmox/probes/lxc-list.test.d.ts +2 -0
- package/dist/proxmox/probes/lxc-list.test.d.ts.map +1 -0
- package/dist/proxmox/probes/lxc-list.test.js +51 -0
- package/dist/proxmox/probes/lxc-list.test.js.map +1 -0
- package/dist/proxmox/probes/vm-config.d.ts +21 -0
- package/dist/proxmox/probes/vm-config.d.ts.map +1 -0
- package/dist/proxmox/probes/vm-config.js +58 -0
- package/dist/proxmox/probes/vm-config.js.map +1 -0
- package/dist/proxmox/probes/vm-config.test.d.ts +2 -0
- package/dist/proxmox/probes/vm-config.test.d.ts.map +1 -0
- package/dist/proxmox/probes/vm-config.test.js +80 -0
- package/dist/proxmox/probes/vm-config.test.js.map +1 -0
- package/dist/proxmox/probes/vm-locks.d.ts +16 -0
- package/dist/proxmox/probes/vm-locks.d.ts.map +1 -0
- package/dist/proxmox/probes/vm-locks.js +35 -0
- package/dist/proxmox/probes/vm-locks.js.map +1 -0
- package/dist/proxmox/probes/vm-locks.test.d.ts +2 -0
- package/dist/proxmox/probes/vm-locks.test.d.ts.map +1 -0
- package/dist/proxmox/probes/vm-locks.test.js +54 -0
- package/dist/proxmox/probes/vm-locks.test.js.map +1 -0
- package/dist/redis/index.d.ts +3 -0
- package/dist/redis/index.d.ts.map +1 -0
- package/dist/redis/index.js +13 -0
- package/dist/redis/index.js.map +1 -0
- package/dist/redis/manifest.d.ts +3 -0
- package/dist/redis/manifest.d.ts.map +1 -0
- package/dist/redis/manifest.js +51 -0
- package/dist/redis/manifest.js.map +1 -0
- package/dist/redis/probes/info.d.ts +15 -0
- package/dist/redis/probes/info.d.ts.map +1 -0
- package/dist/redis/probes/info.js +32 -0
- package/dist/redis/probes/info.js.map +1 -0
- package/dist/redis/probes/info.test.d.ts +2 -0
- package/dist/redis/probes/info.test.d.ts.map +1 -0
- package/dist/redis/probes/info.test.js +64 -0
- package/dist/redis/probes/info.test.js.map +1 -0
- package/dist/redis/probes/keys-count.d.ts +13 -0
- package/dist/redis/probes/keys-count.d.ts.map +1 -0
- package/dist/redis/probes/keys-count.js +24 -0
- package/dist/redis/probes/keys-count.js.map +1 -0
- package/dist/redis/probes/keys-count.test.d.ts +2 -0
- package/dist/redis/probes/keys-count.test.d.ts.map +1 -0
- package/dist/redis/probes/keys-count.test.js +37 -0
- package/dist/redis/probes/keys-count.test.js.map +1 -0
- package/dist/redis/probes/memory-usage.d.ts +16 -0
- package/dist/redis/probes/memory-usage.d.ts.map +1 -0
- package/dist/redis/probes/memory-usage.js +31 -0
- package/dist/redis/probes/memory-usage.js.map +1 -0
- package/dist/redis/probes/memory-usage.test.d.ts +2 -0
- package/dist/redis/probes/memory-usage.test.d.ts.map +1 -0
- package/dist/redis/probes/memory-usage.test.js +48 -0
- package/dist/redis/probes/memory-usage.test.js.map +1 -0
- package/dist/runbooks/nutanix.d.ts +3 -0
- package/dist/runbooks/nutanix.d.ts.map +1 -0
- package/dist/runbooks/nutanix.js +619 -0
- package/dist/runbooks/nutanix.js.map +1 -0
- package/dist/runbooks/nutanix.test.d.ts +2 -0
- package/dist/runbooks/nutanix.test.d.ts.map +1 -0
- package/dist/runbooks/nutanix.test.js +971 -0
- package/dist/runbooks/nutanix.test.js.map +1 -0
- package/dist/runbooks/proxmox.d.ts +3 -0
- package/dist/runbooks/proxmox.d.ts.map +1 -0
- package/dist/runbooks/proxmox.js +451 -0
- package/dist/runbooks/proxmox.js.map +1 -0
- package/dist/runbooks/proxmox.test.d.ts +2 -0
- package/dist/runbooks/proxmox.test.d.ts.map +1 -0
- package/dist/runbooks/proxmox.test.js +700 -0
- package/dist/runbooks/proxmox.test.js.map +1 -0
- package/dist/signatures.d.ts +2 -0
- package/dist/signatures.d.ts.map +1 -0
- package/dist/signatures.js +2 -0
- package/dist/signatures.js.map +1 -0
- package/dist/types.d.ts +53 -0
- package/dist/types.d.ts.map +1 -1
- package/dist/validation.d.ts +6 -1
- package/dist/validation.d.ts.map +1 -1
- package/dist/validation.js +10 -1
- package/dist/validation.js.map +1 -1
- package/package.json +1 -1
- package/src/index.ts +60 -6
- package/src/integrations/citrix.test.ts +592 -0
- package/src/integrations/citrix.ts +557 -0
- package/src/integrations/graph.test.ts +478 -0
- package/src/integrations/graph.ts +413 -0
- package/src/integrations/httpbin.ts +72 -0
- package/src/integrations/nutanix.test.ts +1508 -0
- package/src/integrations/nutanix.ts +1460 -0
- package/src/integrations/proxmox.test.ts +1020 -0
- package/src/integrations/proxmox.ts +989 -0
- package/src/integrations/servicenow.test.ts +314 -0
- package/src/integrations/servicenow.ts +285 -0
- package/src/integrations/splunk.test.ts +440 -0
- package/src/integrations/splunk.ts +356 -0
- package/src/mysql/index.ts +14 -0
- package/src/mysql/manifest.ts +70 -0
- package/src/mysql/probes/databases-list.test.ts +62 -0
- package/src/mysql/probes/databases-list.ts +45 -0
- package/src/mysql/probes/processlist.test.ts +47 -0
- package/src/mysql/probes/processlist.ts +55 -0
- package/src/mysql/probes/status.test.ts +50 -0
- package/src/mysql/probes/status.ts +56 -0
- package/src/nginx/index.ts +14 -0
- package/src/nginx/manifest.ts +69 -0
- package/src/nginx/probes/access-log-tail.test.ts +51 -0
- package/src/nginx/probes/access-log-tail.ts +23 -0
- package/src/nginx/probes/config-test.test.ts +47 -0
- package/src/nginx/probes/config-test.ts +24 -0
- package/src/nginx/probes/error-log-tail.test.ts +44 -0
- package/src/nginx/probes/error-log-tail.ts +23 -0
- package/src/postgres/index.ts +14 -0
- package/src/postgres/manifest.ts +91 -0
- package/src/postgres/probes/connections-active.test.ts +42 -0
- package/src/postgres/probes/connections-active.ts +55 -0
- package/src/postgres/probes/databases-list.test.ts +57 -0
- package/src/postgres/probes/databases-list.ts +49 -0
- package/src/postgres/probes/query-slow.test.ts +37 -0
- package/src/postgres/probes/query-slow.ts +55 -0
- package/src/proxmox/index.ts +24 -0
- package/src/proxmox/manifest.ts +76 -0
- package/src/proxmox/probes/ceph-status.test.ts +126 -0
- package/src/proxmox/probes/ceph-status.ts +116 -0
- package/src/proxmox/probes/cluster-config.test.ts +118 -0
- package/src/proxmox/probes/cluster-config.ts +97 -0
- package/src/proxmox/probes/ha-status.test.ts +76 -0
- package/src/proxmox/probes/ha-status.ts +56 -0
- package/src/proxmox/probes/lvm.test.ts +140 -0
- package/src/proxmox/probes/lvm.ts +121 -0
- package/src/proxmox/probes/lxc-config.test.ts +89 -0
- package/src/proxmox/probes/lxc-config.ts +90 -0
- package/src/proxmox/probes/lxc-list.test.ts +60 -0
- package/src/proxmox/probes/lxc-list.ts +67 -0
- package/src/proxmox/probes/vm-config.test.ts +93 -0
- package/src/proxmox/probes/vm-config.ts +77 -0
- package/src/proxmox/probes/vm-locks.test.ts +63 -0
- package/src/proxmox/probes/vm-locks.ts +49 -0
- package/src/redis/index.ts +14 -0
- package/src/redis/manifest.ts +52 -0
- package/src/redis/probes/info.test.ts +73 -0
- package/src/redis/probes/info.ts +46 -0
- package/src/redis/probes/keys-count.test.ts +44 -0
- package/src/redis/probes/keys-count.ts +38 -0
- package/src/redis/probes/memory-usage.test.ts +54 -0
- package/src/redis/probes/memory-usage.ts +46 -0
- package/src/runbooks/nutanix.test.ts +1138 -0
- package/src/runbooks/nutanix.ts +941 -0
- package/src/runbooks/proxmox.test.ts +838 -0
- package/src/runbooks/proxmox.ts +626 -0
- package/src/signatures.ts +1 -0
- package/src/types.ts +62 -0
- package/src/validation.ts +21 -1
- package/tsconfig.tsbuildinfo +1 -0
|
@@ -0,0 +1,941 @@
|
|
|
1
|
+
import type {
|
|
2
|
+
DiagnosticFinding,
|
|
3
|
+
DiagnosticRunbookDefinition,
|
|
4
|
+
DiagnosticRunbookResult,
|
|
5
|
+
RunProbe,
|
|
6
|
+
RunbookContext,
|
|
7
|
+
RunbookProbeResult,
|
|
8
|
+
} from '../types.js';
|
|
9
|
+
|
|
10
|
+
// --- Helpers ---
|
|
11
|
+
|
|
12
|
+
function buildResult(
|
|
13
|
+
category: string,
|
|
14
|
+
findings: DiagnosticFinding[],
|
|
15
|
+
probeResults: Record<string, RunbookProbeResult>,
|
|
16
|
+
startTime: number,
|
|
17
|
+
summaryText: string,
|
|
18
|
+
): DiagnosticRunbookResult {
|
|
19
|
+
const counts = { info: 0, warning: 0, critical: 0 };
|
|
20
|
+
for (const f of findings) counts[f.severity]++;
|
|
21
|
+
|
|
22
|
+
const results = Object.values(probeResults);
|
|
23
|
+
return {
|
|
24
|
+
category,
|
|
25
|
+
findings,
|
|
26
|
+
probeResults,
|
|
27
|
+
summary: {
|
|
28
|
+
probesRun: results.length,
|
|
29
|
+
probesSucceeded: results.filter((r) => r.status === 'success').length,
|
|
30
|
+
probesFailed: results.filter((r) => r.status !== 'success').length,
|
|
31
|
+
findingsCount: counts,
|
|
32
|
+
durationMs: Date.now() - startTime,
|
|
33
|
+
summaryText,
|
|
34
|
+
},
|
|
35
|
+
};
|
|
36
|
+
}
|
|
37
|
+
|
|
38
|
+
function storeResult(
|
|
39
|
+
map: Record<string, RunbookProbeResult>,
|
|
40
|
+
result: RunbookProbeResult,
|
|
41
|
+
): RunbookProbeResult {
|
|
42
|
+
map[result.probe] = result;
|
|
43
|
+
return result;
|
|
44
|
+
}
|
|
45
|
+
|
|
46
|
+
function probeData<T>(result: RunbookProbeResult): T | undefined {
|
|
47
|
+
if (result.status !== 'success') return undefined;
|
|
48
|
+
return result.data as T;
|
|
49
|
+
}
|
|
50
|
+
|
|
51
|
+
// --- Nutanix data shapes ---
|
|
52
|
+
|
|
53
|
+
interface ClusterInfo {
|
|
54
|
+
name?: string | null;
|
|
55
|
+
extId?: string | null;
|
|
56
|
+
operationMode?: string | null;
|
|
57
|
+
isDegraded?: boolean;
|
|
58
|
+
numNodes?: number | null;
|
|
59
|
+
}
|
|
60
|
+
|
|
61
|
+
interface HostInfo {
|
|
62
|
+
name?: string | null;
|
|
63
|
+
extId?: string | null;
|
|
64
|
+
maintenanceMode?: boolean;
|
|
65
|
+
}
|
|
66
|
+
|
|
67
|
+
interface VmInfo {
|
|
68
|
+
name?: string | null;
|
|
69
|
+
extId?: string | null;
|
|
70
|
+
powerState?: string | null;
|
|
71
|
+
numSockets?: number | null;
|
|
72
|
+
numCoresPerSocket?: number | null;
|
|
73
|
+
memorySizeMb?: number | null;
|
|
74
|
+
clusterExtId?: string | null;
|
|
75
|
+
hostExtId?: string | null;
|
|
76
|
+
}
|
|
77
|
+
|
|
78
|
+
interface AlertInfo {
|
|
79
|
+
title?: string | null;
|
|
80
|
+
severity?: string | null;
|
|
81
|
+
sourceEntity?: { type?: string | null; name?: string | null; extId?: string | null };
|
|
82
|
+
creationTime?: string | null;
|
|
83
|
+
possibleCauses?: unknown;
|
|
84
|
+
resolutionSteps?: unknown;
|
|
85
|
+
}
|
|
86
|
+
|
|
87
|
+
interface ContainerInfo {
|
|
88
|
+
name?: string | null;
|
|
89
|
+
usedPct?: number;
|
|
90
|
+
highUsage?: boolean;
|
|
91
|
+
maxCapacityBytes?: number;
|
|
92
|
+
usedBytes?: number;
|
|
93
|
+
availableBytes?: number;
|
|
94
|
+
}
|
|
95
|
+
|
|
96
|
+
interface TaskInfo {
|
|
97
|
+
type?: string | null;
|
|
98
|
+
status?: string | null;
|
|
99
|
+
isFailed?: boolean;
|
|
100
|
+
isLongRunning?: boolean;
|
|
101
|
+
startTime?: string | null;
|
|
102
|
+
errorMessage?: string | null;
|
|
103
|
+
}
|
|
104
|
+
|
|
105
|
+
interface SnapshotInfo {
|
|
106
|
+
name?: string | null;
|
|
107
|
+
ageDays?: number | null;
|
|
108
|
+
isOld?: boolean;
|
|
109
|
+
isExpired?: boolean;
|
|
110
|
+
}
|
|
111
|
+
|
|
112
|
+
// --- 1. Cluster Health Runbook ---
|
|
113
|
+
|
|
114
|
+
async function clusterHealthHandler(
|
|
115
|
+
_params: Record<string, unknown>,
|
|
116
|
+
runProbe: RunProbe,
|
|
117
|
+
_context: RunbookContext,
|
|
118
|
+
): Promise<DiagnosticRunbookResult> {
|
|
119
|
+
const startTime = Date.now();
|
|
120
|
+
const findings: DiagnosticFinding[] = [];
|
|
121
|
+
const probeResults: Record<string, RunbookProbeResult> = {};
|
|
122
|
+
|
|
123
|
+
// Group 1: parallel fleet-wide probes
|
|
124
|
+
const [clustersR, alertsR, storageR, tasksR, lifecycleR, hostsR] = await Promise.all([
|
|
125
|
+
runProbe('nutanix.clusters.list').then((r) => storeResult(probeResults, r)),
|
|
126
|
+
runProbe('nutanix.alerts.summary').then((r) => storeResult(probeResults, r)),
|
|
127
|
+
runProbe('nutanix.storage.containers').then((r) => storeResult(probeResults, r)),
|
|
128
|
+
runProbe('nutanix.tasks.recent', { hours: 24 }).then((r) => storeResult(probeResults, r)),
|
|
129
|
+
runProbe('nutanix.lifecycle.status').then((r) => storeResult(probeResults, r)),
|
|
130
|
+
runProbe('nutanix.hosts.list').then((r) => storeResult(probeResults, r)),
|
|
131
|
+
]);
|
|
132
|
+
|
|
133
|
+
// --- Analysis ---
|
|
134
|
+
|
|
135
|
+
// Degraded clusters
|
|
136
|
+
const clustersData = probeData<{ clusters?: ClusterInfo[]; totalCount?: number }>(clustersR);
|
|
137
|
+
const clusters = clustersData?.clusters ?? [];
|
|
138
|
+
for (const c of clusters) {
|
|
139
|
+
if (c.isDegraded) {
|
|
140
|
+
findings.push({
|
|
141
|
+
severity: 'critical',
|
|
142
|
+
title: `Cluster ${c.name ?? c.extId} is degraded`,
|
|
143
|
+
detail: `Cluster "${c.name}" is in ${c.operationMode} mode (expected NORMAL)`,
|
|
144
|
+
remediation: 'Check Prism Central for cluster-level alerts and node status',
|
|
145
|
+
relatedProbes: ['nutanix.clusters.list'],
|
|
146
|
+
});
|
|
147
|
+
}
|
|
148
|
+
}
|
|
149
|
+
|
|
150
|
+
// Hosts in maintenance
|
|
151
|
+
const hostsData = probeData<{ hosts?: HostInfo[]; totalCount?: number }>(hostsR);
|
|
152
|
+
const hosts = hostsData?.hosts ?? [];
|
|
153
|
+
const maintenanceHosts = hosts.filter((h) => h.maintenanceMode);
|
|
154
|
+
if (maintenanceHosts.length > 0) {
|
|
155
|
+
findings.push({
|
|
156
|
+
severity: 'warning',
|
|
157
|
+
title: `${maintenanceHosts.length} host(s) in maintenance mode`,
|
|
158
|
+
detail: `Hosts in maintenance: ${maintenanceHosts.map((h) => h.name ?? h.extId).join(', ')}`,
|
|
159
|
+
relatedProbes: ['nutanix.hosts.list'],
|
|
160
|
+
});
|
|
161
|
+
}
|
|
162
|
+
|
|
163
|
+
// Critical alerts
|
|
164
|
+
const alertsData = probeData<{
|
|
165
|
+
bySeverity?: Record<string, number>;
|
|
166
|
+
unresolvedCritical?: Array<{
|
|
167
|
+
title?: unknown;
|
|
168
|
+
sourceEntity?: unknown;
|
|
169
|
+
creationTime?: unknown;
|
|
170
|
+
}>;
|
|
171
|
+
totalCount?: number;
|
|
172
|
+
}>(alertsR);
|
|
173
|
+
const criticalCount = alertsData?.bySeverity?.CRITICAL ?? 0;
|
|
174
|
+
const unresolvedCritical = alertsData?.unresolvedCritical ?? [];
|
|
175
|
+
|
|
176
|
+
if (unresolvedCritical.length > 0) {
|
|
177
|
+
for (const alert of unresolvedCritical.slice(0, 5)) {
|
|
178
|
+
findings.push({
|
|
179
|
+
severity: 'critical',
|
|
180
|
+
title: `Unresolved critical alert: ${alert.title ?? 'Unknown'}`,
|
|
181
|
+
detail: `Source: ${JSON.stringify(alert.sourceEntity)}, Created: ${alert.creationTime ?? 'unknown'}`,
|
|
182
|
+
remediation: 'Review and resolve in Prism Central Alerts dashboard',
|
|
183
|
+
relatedProbes: ['nutanix.alerts.summary'],
|
|
184
|
+
});
|
|
185
|
+
}
|
|
186
|
+
if (unresolvedCritical.length > 5) {
|
|
187
|
+
findings.push({
|
|
188
|
+
severity: 'critical',
|
|
189
|
+
title: `${unresolvedCritical.length - 5} additional unresolved critical alerts`,
|
|
190
|
+
detail: `Total unresolved critical alerts: ${unresolvedCritical.length}`,
|
|
191
|
+
relatedProbes: ['nutanix.alerts.summary'],
|
|
192
|
+
});
|
|
193
|
+
}
|
|
194
|
+
}
|
|
195
|
+
|
|
196
|
+
// Storage containers >85%
|
|
197
|
+
const storageData = probeData<{ containers?: ContainerInfo[]; totalCount?: number }>(storageR);
|
|
198
|
+
const containers = storageData?.containers ?? [];
|
|
199
|
+
for (const c of containers) {
|
|
200
|
+
if (c.highUsage) {
|
|
201
|
+
findings.push({
|
|
202
|
+
severity: 'warning',
|
|
203
|
+
title: `Storage container ${c.name} at ${c.usedPct}%`,
|
|
204
|
+
detail: `Container "${c.name}" is running low on capacity`,
|
|
205
|
+
remediation: 'Consider expanding storage or migrating VMs to less utilized containers',
|
|
206
|
+
relatedProbes: ['nutanix.storage.containers'],
|
|
207
|
+
});
|
|
208
|
+
}
|
|
209
|
+
}
|
|
210
|
+
|
|
211
|
+
// Failed/long-running tasks
|
|
212
|
+
const tasksData = probeData<{ tasks?: TaskInfo[]; totalCount?: number }>(tasksR);
|
|
213
|
+
const tasks = tasksData?.tasks ?? [];
|
|
214
|
+
const failedTasks = tasks.filter((t) => t.isFailed);
|
|
215
|
+
const longRunning = tasks.filter((t) => t.isLongRunning);
|
|
216
|
+
|
|
217
|
+
if (failedTasks.length > 0) {
|
|
218
|
+
findings.push({
|
|
219
|
+
severity: 'warning',
|
|
220
|
+
title: `${failedTasks.length} failed task(s) in last 24 hours`,
|
|
221
|
+
detail: failedTasks
|
|
222
|
+
.slice(0, 5)
|
|
223
|
+
.map((t) => `${t.type}: ${t.errorMessage ?? t.status}`)
|
|
224
|
+
.join('; '),
|
|
225
|
+
relatedProbes: ['nutanix.tasks.recent'],
|
|
226
|
+
});
|
|
227
|
+
}
|
|
228
|
+
|
|
229
|
+
if (longRunning.length > 0) {
|
|
230
|
+
findings.push({
|
|
231
|
+
severity: 'warning',
|
|
232
|
+
title: `${longRunning.length} long-running task(s) (>1 hour)`,
|
|
233
|
+
detail: longRunning
|
|
234
|
+
.slice(0, 5)
|
|
235
|
+
.map((t) => `${t.type} started ${t.startTime}`)
|
|
236
|
+
.join('; '),
|
|
237
|
+
relatedProbes: ['nutanix.tasks.recent'],
|
|
238
|
+
});
|
|
239
|
+
}
|
|
240
|
+
|
|
241
|
+
// Available LCM updates
|
|
242
|
+
const lifecycleData = probeData<{
|
|
243
|
+
updatableCount?: number;
|
|
244
|
+
warnings?: string[];
|
|
245
|
+
}>(lifecycleR);
|
|
246
|
+
if (lifecycleData?.updatableCount && lifecycleData.updatableCount > 0) {
|
|
247
|
+
findings.push({
|
|
248
|
+
severity: 'info',
|
|
249
|
+
title: `${lifecycleData.updatableCount} component update(s) available`,
|
|
250
|
+
detail: (lifecycleData.warnings ?? []).join('; '),
|
|
251
|
+
relatedProbes: ['nutanix.lifecycle.status'],
|
|
252
|
+
});
|
|
253
|
+
}
|
|
254
|
+
|
|
255
|
+
// All clear
|
|
256
|
+
if (findings.length === 0) {
|
|
257
|
+
findings.push({
|
|
258
|
+
severity: 'info',
|
|
259
|
+
title: 'Nutanix environment is healthy',
|
|
260
|
+
detail: `${clusters.length} cluster(s), ${hosts.length} host(s), ${criticalCount} critical alert(s), all storage within thresholds`,
|
|
261
|
+
relatedProbes: [
|
|
262
|
+
'nutanix.clusters.list',
|
|
263
|
+
'nutanix.hosts.list',
|
|
264
|
+
'nutanix.alerts.summary',
|
|
265
|
+
'nutanix.storage.containers',
|
|
266
|
+
],
|
|
267
|
+
});
|
|
268
|
+
}
|
|
269
|
+
|
|
270
|
+
const clusterNames = clusters.map((c) => c.name ?? 'unknown').join(', ');
|
|
271
|
+
const issueCount = findings.filter((f) => f.severity !== 'info').length;
|
|
272
|
+
return buildResult(
|
|
273
|
+
'nutanix-cluster-health',
|
|
274
|
+
findings,
|
|
275
|
+
probeResults,
|
|
276
|
+
startTime,
|
|
277
|
+
`${clusters.length} cluster(s) [${clusterNames}], ${hosts.length} host(s), ${issueCount} issue(s)`,
|
|
278
|
+
);
|
|
279
|
+
}
|
|
280
|
+
|
|
281
|
+
// --- 2. VM Health Runbook ---
|
|
282
|
+
|
|
283
|
+
async function vmHealthHandler(
|
|
284
|
+
params: Record<string, unknown>,
|
|
285
|
+
runProbe: RunProbe,
|
|
286
|
+
_context: RunbookContext,
|
|
287
|
+
): Promise<DiagnosticRunbookResult> {
|
|
288
|
+
const startTime = Date.now();
|
|
289
|
+
const findings: DiagnosticFinding[] = [];
|
|
290
|
+
const probeResults: Record<string, RunbookProbeResult> = {};
|
|
291
|
+
|
|
292
|
+
let vmId = params.vm_id as string | undefined;
|
|
293
|
+
const vmName = params.vm_name as string | undefined;
|
|
294
|
+
|
|
295
|
+
// Step 1: resolve vm_name → vm_id if needed
|
|
296
|
+
if (!vmId && vmName) {
|
|
297
|
+
const searchR = await runProbe('nutanix.vms.list', { name: vmName });
|
|
298
|
+
storeResult(probeResults, searchR);
|
|
299
|
+
const searchData = probeData<{ vms?: VmInfo[] }>(searchR);
|
|
300
|
+
const found = searchData?.vms?.[0];
|
|
301
|
+
if (!found?.extId) {
|
|
302
|
+
findings.push({
|
|
303
|
+
severity: 'critical',
|
|
304
|
+
title: `VM "${vmName}" not found`,
|
|
305
|
+
detail: searchR.error ?? `No VM matching name "${vmName}" was found`,
|
|
306
|
+
relatedProbes: ['nutanix.vms.list'],
|
|
307
|
+
});
|
|
308
|
+
return buildResult(
|
|
309
|
+
'nutanix-vm-health',
|
|
310
|
+
findings,
|
|
311
|
+
probeResults,
|
|
312
|
+
startTime,
|
|
313
|
+
`VM "${vmName}" not found`,
|
|
314
|
+
);
|
|
315
|
+
}
|
|
316
|
+
vmId = found.extId as string;
|
|
317
|
+
}
|
|
318
|
+
|
|
319
|
+
if (!vmId) {
|
|
320
|
+
findings.push({
|
|
321
|
+
severity: 'critical',
|
|
322
|
+
title: 'No VM specified',
|
|
323
|
+
detail: 'Either vm_id or vm_name must be provided',
|
|
324
|
+
relatedProbes: [],
|
|
325
|
+
});
|
|
326
|
+
return buildResult('nutanix-vm-health', findings, probeResults, startTime, 'No VM specified');
|
|
327
|
+
}
|
|
328
|
+
|
|
329
|
+
// Step 2: parallel VM probes
|
|
330
|
+
const [detailR, statsR, snapshotsR, alertsR, protectionR] = await Promise.all([
|
|
331
|
+
runProbe('nutanix.vm.detail', { vm_id: vmId }).then((r) => storeResult(probeResults, r)),
|
|
332
|
+
runProbe('nutanix.vm.stats', { vm_id: vmId }).then((r) => storeResult(probeResults, r)),
|
|
333
|
+
runProbe('nutanix.vm.snapshots', { vm_id: vmId }).then((r) => storeResult(probeResults, r)),
|
|
334
|
+
runProbe('nutanix.alerts.list', { entity_type: 'vm', resolved: false, limit: 20 }).then((r) =>
|
|
335
|
+
storeResult(probeResults, r),
|
|
336
|
+
),
|
|
337
|
+
runProbe('nutanix.protection.policies', { vm_id: vmId }).then((r) =>
|
|
338
|
+
storeResult(probeResults, r),
|
|
339
|
+
),
|
|
340
|
+
]);
|
|
341
|
+
|
|
342
|
+
// --- Analysis ---
|
|
343
|
+
|
|
344
|
+
// VM detail
|
|
345
|
+
const vmData = probeData<{
|
|
346
|
+
name?: string | null;
|
|
347
|
+
extId?: string | null;
|
|
348
|
+
powerState?: string | null;
|
|
349
|
+
numSockets?: number | null;
|
|
350
|
+
numCoresPerSocket?: number | null;
|
|
351
|
+
memorySizeMb?: number | null;
|
|
352
|
+
guestTools?: Record<string, unknown> | null;
|
|
353
|
+
categories?: unknown;
|
|
354
|
+
clusterExtId?: string | null;
|
|
355
|
+
hostExtId?: string | null;
|
|
356
|
+
}>(detailR);
|
|
357
|
+
|
|
358
|
+
if (!vmData) {
|
|
359
|
+
findings.push({
|
|
360
|
+
severity: 'critical',
|
|
361
|
+
title: `VM ${vmId} unreachable`,
|
|
362
|
+
detail: detailR.error ?? 'Could not retrieve VM details',
|
|
363
|
+
relatedProbes: ['nutanix.vm.detail'],
|
|
364
|
+
});
|
|
365
|
+
return buildResult(
|
|
366
|
+
'nutanix-vm-health',
|
|
367
|
+
findings,
|
|
368
|
+
probeResults,
|
|
369
|
+
startTime,
|
|
370
|
+
`VM ${vmId} unreachable`,
|
|
371
|
+
);
|
|
372
|
+
}
|
|
373
|
+
|
|
374
|
+
const vmLabel = vmData.name ?? vmId;
|
|
375
|
+
|
|
376
|
+
// Power state
|
|
377
|
+
if (vmData.powerState === 'OFF') {
|
|
378
|
+
findings.push({
|
|
379
|
+
severity: 'warning',
|
|
380
|
+
title: `VM ${vmLabel} is powered off`,
|
|
381
|
+
detail: `VM "${vmLabel}" is not running`,
|
|
382
|
+
relatedProbes: ['nutanix.vm.detail'],
|
|
383
|
+
});
|
|
384
|
+
}
|
|
385
|
+
|
|
386
|
+
// Performance stats
|
|
387
|
+
const statsData = probeData<{
|
|
388
|
+
cpuUsagePct?: number | null;
|
|
389
|
+
memoryUsagePct?: number | null;
|
|
390
|
+
iops?: number | null;
|
|
391
|
+
avgIoLatencyMs?: number | null;
|
|
392
|
+
}>(statsR);
|
|
393
|
+
|
|
394
|
+
if (statsData) {
|
|
395
|
+
if (statsData.cpuUsagePct != null && statsData.cpuUsagePct > 85) {
|
|
396
|
+
findings.push({
|
|
397
|
+
severity: 'warning',
|
|
398
|
+
title: `VM ${vmLabel} CPU at ${statsData.cpuUsagePct}%`,
|
|
399
|
+
detail: 'CPU utilization is high. Consider adding vCPUs or investigating workload.',
|
|
400
|
+
relatedProbes: ['nutanix.vm.stats'],
|
|
401
|
+
});
|
|
402
|
+
}
|
|
403
|
+
|
|
404
|
+
if (statsData.memoryUsagePct != null && statsData.memoryUsagePct > 90) {
|
|
405
|
+
findings.push({
|
|
406
|
+
severity: 'warning',
|
|
407
|
+
title: `VM ${vmLabel} memory at ${statsData.memoryUsagePct}%`,
|
|
408
|
+
detail: 'Memory utilization is critically high. Consider adding memory.',
|
|
409
|
+
relatedProbes: ['nutanix.vm.stats'],
|
|
410
|
+
});
|
|
411
|
+
}
|
|
412
|
+
|
|
413
|
+
if (statsData.avgIoLatencyMs != null && statsData.avgIoLatencyMs > 20) {
|
|
414
|
+
findings.push({
|
|
415
|
+
severity: 'warning',
|
|
416
|
+
title: `VM ${vmLabel} I/O latency at ${statsData.avgIoLatencyMs}ms`,
|
|
417
|
+
detail: 'Average I/O latency exceeds 20ms threshold. Check storage performance.',
|
|
418
|
+
relatedProbes: ['nutanix.vm.stats'],
|
|
419
|
+
});
|
|
420
|
+
}
|
|
421
|
+
}
|
|
422
|
+
|
|
423
|
+
// Protection policies
|
|
424
|
+
const protectionData = probeData<{
|
|
425
|
+
vmCovered?: boolean;
|
|
426
|
+
policies?: Array<{ name?: string | null }>;
|
|
427
|
+
totalCount?: number;
|
|
428
|
+
}>(protectionR);
|
|
429
|
+
|
|
430
|
+
if (protectionData && protectionData.vmCovered === false) {
|
|
431
|
+
findings.push({
|
|
432
|
+
severity: 'warning',
|
|
433
|
+
title: `VM ${vmLabel} has no data protection`,
|
|
434
|
+
detail: 'No protection policies cover this VM. It is not being backed up.',
|
|
435
|
+
remediation: 'Add this VM to a protection policy in Prism Central Data Protection',
|
|
436
|
+
relatedProbes: ['nutanix.protection.policies'],
|
|
437
|
+
});
|
|
438
|
+
}
|
|
439
|
+
|
|
440
|
+
// Snapshots
|
|
441
|
+
const snapshotsData = probeData<{
|
|
442
|
+
snapshots?: SnapshotInfo[];
|
|
443
|
+
totalCount?: number;
|
|
444
|
+
warnings?: string[];
|
|
445
|
+
}>(snapshotsR);
|
|
446
|
+
|
|
447
|
+
if (snapshotsData) {
|
|
448
|
+
const oldSnapshots = (snapshotsData.snapshots ?? []).filter((s) => s.isOld);
|
|
449
|
+
const expiredSnapshots = (snapshotsData.snapshots ?? []).filter((s) => s.isExpired);
|
|
450
|
+
|
|
451
|
+
if (oldSnapshots.length > 0) {
|
|
452
|
+
findings.push({
|
|
453
|
+
severity: 'info',
|
|
454
|
+
title: `${oldSnapshots.length} snapshot(s) older than 7 days`,
|
|
455
|
+
detail: oldSnapshots.map((s) => `${s.name ?? 'unnamed'}: ${s.ageDays} days old`).join('; '),
|
|
456
|
+
remediation: 'Review and clean up old snapshots to reclaim storage',
|
|
457
|
+
relatedProbes: ['nutanix.vm.snapshots'],
|
|
458
|
+
});
|
|
459
|
+
}
|
|
460
|
+
|
|
461
|
+
if (expiredSnapshots.length > 0) {
|
|
462
|
+
findings.push({
|
|
463
|
+
severity: 'warning',
|
|
464
|
+
title: `${expiredSnapshots.length} expired snapshot(s) not cleaned up`,
|
|
465
|
+
detail: 'Expired recovery points should be removed to reclaim storage',
|
|
466
|
+
remediation: 'Delete expired snapshots via Prism Central',
|
|
467
|
+
relatedProbes: ['nutanix.vm.snapshots'],
|
|
468
|
+
});
|
|
469
|
+
}
|
|
470
|
+
}
|
|
471
|
+
|
|
472
|
+
// Guest tools
|
|
473
|
+
if (vmData.guestTools) {
|
|
474
|
+
const gtEnabled =
|
|
475
|
+
(vmData.guestTools as Record<string, unknown>).isEnabled ??
|
|
476
|
+
(vmData.guestTools as Record<string, unknown>).ngtState;
|
|
477
|
+
if (!gtEnabled || gtEnabled === 'UNINSTALLED') {
|
|
478
|
+
findings.push({
|
|
479
|
+
severity: 'info',
|
|
480
|
+
title: `Guest tools not installed on ${vmLabel}`,
|
|
481
|
+
detail: 'Nutanix Guest Tools (NGT) are not installed. Some features may be unavailable.',
|
|
482
|
+
remediation: 'Install NGT from Prism Central VM management',
|
|
483
|
+
relatedProbes: ['nutanix.vm.detail'],
|
|
484
|
+
});
|
|
485
|
+
}
|
|
486
|
+
}
|
|
487
|
+
|
|
488
|
+
// VM-related alerts
|
|
489
|
+
const alertsData = probeData<{ alerts?: AlertInfo[]; totalCount?: number }>(alertsR);
|
|
490
|
+
const vmAlerts = (alertsData?.alerts ?? []).filter(
|
|
491
|
+
(a) => a.sourceEntity?.extId === vmId || a.sourceEntity?.name === vmData.name,
|
|
492
|
+
);
|
|
493
|
+
if (vmAlerts.length > 0) {
|
|
494
|
+
for (const alert of vmAlerts.slice(0, 3)) {
|
|
495
|
+
findings.push({
|
|
496
|
+
severity: alert.severity === 'CRITICAL' ? 'critical' : 'warning',
|
|
497
|
+
title: `Alert: ${alert.title ?? 'Unknown'}`,
|
|
498
|
+
detail: `Source: ${alert.sourceEntity?.name ?? 'unknown'}, Created: ${alert.creationTime ?? 'unknown'}`,
|
|
499
|
+
relatedProbes: ['nutanix.alerts.list'],
|
|
500
|
+
});
|
|
501
|
+
}
|
|
502
|
+
}
|
|
503
|
+
|
|
504
|
+
// All clear
|
|
505
|
+
if (findings.length === 0) {
|
|
506
|
+
findings.push({
|
|
507
|
+
severity: 'info',
|
|
508
|
+
title: `VM ${vmLabel} is healthy`,
|
|
509
|
+
detail: 'VM is running, performance within thresholds, data protection in place',
|
|
510
|
+
relatedProbes: ['nutanix.vm.detail', 'nutanix.vm.stats', 'nutanix.protection.policies'],
|
|
511
|
+
});
|
|
512
|
+
}
|
|
513
|
+
|
|
514
|
+
return buildResult(
|
|
515
|
+
'nutanix-vm-health',
|
|
516
|
+
findings,
|
|
517
|
+
probeResults,
|
|
518
|
+
startTime,
|
|
519
|
+
`VM ${vmLabel} health check: ${findings.length} finding(s)`,
|
|
520
|
+
);
|
|
521
|
+
}
|
|
522
|
+
|
|
523
|
+
// --- 3. Capacity Planning Runbook ---
|
|
524
|
+
|
|
525
|
+
async function capacityPlanningHandler(
|
|
526
|
+
_params: Record<string, unknown>,
|
|
527
|
+
runProbe: RunProbe,
|
|
528
|
+
_context: RunbookContext,
|
|
529
|
+
): Promise<DiagnosticRunbookResult> {
|
|
530
|
+
const startTime = Date.now();
|
|
531
|
+
const findings: DiagnosticFinding[] = [];
|
|
532
|
+
const probeResults: Record<string, RunbookProbeResult> = {};
|
|
533
|
+
|
|
534
|
+
// Step 1: get all clusters
|
|
535
|
+
const clustersR = await runProbe('nutanix.clusters.list');
|
|
536
|
+
storeResult(probeResults, clustersR);
|
|
537
|
+
|
|
538
|
+
const clustersData = probeData<{ clusters?: ClusterInfo[] }>(clustersR);
|
|
539
|
+
const clusters = clustersData?.clusters ?? [];
|
|
540
|
+
|
|
541
|
+
if (clusters.length === 0) {
|
|
542
|
+
findings.push({
|
|
543
|
+
severity: 'critical',
|
|
544
|
+
title: 'No clusters found',
|
|
545
|
+
detail: clustersR.error ?? 'Could not retrieve cluster information',
|
|
546
|
+
relatedProbes: ['nutanix.clusters.list'],
|
|
547
|
+
});
|
|
548
|
+
return buildResult(
|
|
549
|
+
'nutanix-capacity-planning',
|
|
550
|
+
findings,
|
|
551
|
+
probeResults,
|
|
552
|
+
startTime,
|
|
553
|
+
'No clusters found',
|
|
554
|
+
);
|
|
555
|
+
}
|
|
556
|
+
|
|
557
|
+
// Step 2: per-cluster stats, storage, hosts, VMs (parallel per cluster)
|
|
558
|
+
for (const cluster of clusters) {
|
|
559
|
+
const clusterId = cluster.extId as string;
|
|
560
|
+
if (!clusterId) continue;
|
|
561
|
+
|
|
562
|
+
const clusterLabel = (cluster.name as string) ?? clusterId;
|
|
563
|
+
|
|
564
|
+
const [statsR, storageR, hostsR, vmsR] = await Promise.all([
|
|
565
|
+
runProbe('nutanix.cluster.stats', { cluster_id: clusterId }).then((r) =>
|
|
566
|
+
storeResult(probeResults, { ...r, probe: `nutanix.cluster.stats:${clusterLabel}` }),
|
|
567
|
+
),
|
|
568
|
+
runProbe('nutanix.storage.containers', { cluster_id: clusterId }).then((r) =>
|
|
569
|
+
storeResult(probeResults, { ...r, probe: `nutanix.storage.containers:${clusterLabel}` }),
|
|
570
|
+
),
|
|
571
|
+
runProbe('nutanix.hosts.list', { cluster_id: clusterId }).then((r) =>
|
|
572
|
+
storeResult(probeResults, { ...r, probe: `nutanix.hosts.list:${clusterLabel}` }),
|
|
573
|
+
),
|
|
574
|
+
runProbe('nutanix.vms.list', { cluster_id: clusterId, power_state: 'ON', limit: 500 }).then(
|
|
575
|
+
(r) => storeResult(probeResults, { ...r, probe: `nutanix.vms.list:${clusterLabel}` }),
|
|
576
|
+
),
|
|
577
|
+
]);
|
|
578
|
+
|
|
579
|
+
// Cluster resource utilization
|
|
580
|
+
const stats = probeData<{
|
|
581
|
+
cpuUsagePct?: number | null;
|
|
582
|
+
memoryUsagePct?: number | null;
|
|
583
|
+
storageUsagePct?: number | null;
|
|
584
|
+
cpuCapacityHz?: number | null;
|
|
585
|
+
cpuUsedHz?: number | null;
|
|
586
|
+
memoryCapacityBytes?: number | null;
|
|
587
|
+
memoryUsedBytes?: number | null;
|
|
588
|
+
storageCapacityBytes?: number | null;
|
|
589
|
+
storageUsedBytes?: number | null;
|
|
590
|
+
}>(statsR);
|
|
591
|
+
|
|
592
|
+
if (stats) {
|
|
593
|
+
// CPU headroom
|
|
594
|
+
if (stats.cpuUsagePct != null && stats.cpuUsagePct > 80) {
|
|
595
|
+
findings.push({
|
|
596
|
+
severity: stats.cpuUsagePct > 90 ? 'critical' : 'warning',
|
|
597
|
+
title: `Cluster ${clusterLabel} CPU at ${stats.cpuUsagePct}%`,
|
|
598
|
+
detail: `CPU capacity is ${stats.cpuUsagePct > 90 ? 'critically' : 'significantly'} consumed`,
|
|
599
|
+
remediation: 'Add nodes or migrate workloads to less utilized clusters',
|
|
600
|
+
relatedProbes: [`nutanix.cluster.stats:${clusterLabel}`],
|
|
601
|
+
});
|
|
602
|
+
}
|
|
603
|
+
|
|
604
|
+
// Memory headroom
|
|
605
|
+
if (stats.memoryUsagePct != null && stats.memoryUsagePct > 80) {
|
|
606
|
+
findings.push({
|
|
607
|
+
severity: stats.memoryUsagePct > 90 ? 'critical' : 'warning',
|
|
608
|
+
title: `Cluster ${clusterLabel} memory at ${stats.memoryUsagePct}%`,
|
|
609
|
+
detail: `Memory capacity is ${stats.memoryUsagePct > 90 ? 'critically' : 'significantly'} consumed`,
|
|
610
|
+
remediation: 'Add memory or migrate VMs to other clusters',
|
|
611
|
+
relatedProbes: [`nutanix.cluster.stats:${clusterLabel}`],
|
|
612
|
+
});
|
|
613
|
+
}
|
|
614
|
+
|
|
615
|
+
// Storage headroom
|
|
616
|
+
if (stats.storageUsagePct != null && stats.storageUsagePct > 80) {
|
|
617
|
+
findings.push({
|
|
618
|
+
severity: stats.storageUsagePct > 90 ? 'critical' : 'warning',
|
|
619
|
+
title: `Cluster ${clusterLabel} storage at ${stats.storageUsagePct}%`,
|
|
620
|
+
detail: `Storage capacity is ${stats.storageUsagePct > 90 ? 'critically' : 'significantly'} consumed`,
|
|
621
|
+
remediation: 'Add storage nodes, enable deduplication/compression, or archive cold data',
|
|
622
|
+
relatedProbes: [`nutanix.cluster.stats:${clusterLabel}`],
|
|
623
|
+
});
|
|
624
|
+
}
|
|
625
|
+
}
|
|
626
|
+
|
|
627
|
+
// Per-container storage analysis
|
|
628
|
+
const containerData = probeData<{ containers?: ContainerInfo[] }>(storageR);
|
|
629
|
+
for (const c of containerData?.containers ?? []) {
|
|
630
|
+
if (c.highUsage) {
|
|
631
|
+
findings.push({
|
|
632
|
+
severity: 'warning',
|
|
633
|
+
title: `Container ${c.name} on ${clusterLabel} at ${c.usedPct}%`,
|
|
634
|
+
detail: 'Storage container is above 85% utilization',
|
|
635
|
+
relatedProbes: [`nutanix.storage.containers:${clusterLabel}`],
|
|
636
|
+
});
|
|
637
|
+
}
|
|
638
|
+
}
|
|
639
|
+
|
|
640
|
+
// Host count vs VM density
|
|
641
|
+
const hostsData = probeData<{ hosts?: HostInfo[] }>(hostsR);
|
|
642
|
+
const vmsData = probeData<{ vms?: VmInfo[]; totalCount?: number }>(vmsR);
|
|
643
|
+
const hostCount = hostsData?.hosts?.length ?? 0;
|
|
644
|
+
const vmCount = vmsData?.totalCount ?? vmsData?.vms?.length ?? 0;
|
|
645
|
+
|
|
646
|
+
if (hostCount > 0 && vmCount > 0) {
|
|
647
|
+
const vmPerHost = Math.round((vmCount / hostCount) * 10) / 10;
|
|
648
|
+
if (vmPerHost > 30) {
|
|
649
|
+
findings.push({
|
|
650
|
+
severity: 'warning',
|
|
651
|
+
title: `High VM density on ${clusterLabel}: ${vmPerHost} VMs/host`,
|
|
652
|
+
detail: `${vmCount} running VMs across ${hostCount} hosts. High density may impact performance.`,
|
|
653
|
+
relatedProbes: [`nutanix.hosts.list:${clusterLabel}`, `nutanix.vms.list:${clusterLabel}`],
|
|
654
|
+
});
|
|
655
|
+
}
|
|
656
|
+
}
|
|
657
|
+
}
|
|
658
|
+
|
|
659
|
+
// All clear
|
|
660
|
+
if (findings.length === 0) {
|
|
661
|
+
findings.push({
|
|
662
|
+
severity: 'info',
|
|
663
|
+
title: 'Capacity is within healthy thresholds',
|
|
664
|
+
detail: `${clusters.length} cluster(s) analyzed, all below 80% on CPU, memory, and storage`,
|
|
665
|
+
relatedProbes: ['nutanix.clusters.list'],
|
|
666
|
+
});
|
|
667
|
+
}
|
|
668
|
+
|
|
669
|
+
const issueCount = findings.filter((f) => f.severity !== 'info').length;
|
|
670
|
+
return buildResult(
|
|
671
|
+
'nutanix-capacity-planning',
|
|
672
|
+
findings,
|
|
673
|
+
probeResults,
|
|
674
|
+
startTime,
|
|
675
|
+
`${clusters.length} cluster(s) analyzed, ${issueCount} capacity concern(s)`,
|
|
676
|
+
);
|
|
677
|
+
}
|
|
678
|
+
|
|
679
|
+
// --- 4. Storefront Investigation Runbook ---
|
|
680
|
+
|
|
681
|
+
async function storefrontInvestigateHandler(
|
|
682
|
+
params: Record<string, unknown>,
|
|
683
|
+
runProbe: RunProbe,
|
|
684
|
+
_context: RunbookContext,
|
|
685
|
+
): Promise<DiagnosticRunbookResult> {
|
|
686
|
+
const startTime = Date.now();
|
|
687
|
+
const findings: DiagnosticFinding[] = [];
|
|
688
|
+
const probeResults: Record<string, RunbookProbeResult> = {};
|
|
689
|
+
|
|
690
|
+
const categoryKey = params.category_key as string;
|
|
691
|
+
const categoryValue = params.category_value as string;
|
|
692
|
+
|
|
693
|
+
if (!categoryKey || !categoryValue) {
|
|
694
|
+
findings.push({
|
|
695
|
+
severity: 'critical',
|
|
696
|
+
title: 'Missing category parameters',
|
|
697
|
+
detail: 'Both category_key and category_value must be provided',
|
|
698
|
+
relatedProbes: [],
|
|
699
|
+
});
|
|
700
|
+
return buildResult(
|
|
701
|
+
'nutanix-storefront-investigate',
|
|
702
|
+
findings,
|
|
703
|
+
probeResults,
|
|
704
|
+
startTime,
|
|
705
|
+
'Missing category parameters',
|
|
706
|
+
);
|
|
707
|
+
}
|
|
708
|
+
|
|
709
|
+
// Step 1: find entities tagged with this category
|
|
710
|
+
const entitiesR = await runProbe('nutanix.categories.entities', {
|
|
711
|
+
key: categoryKey,
|
|
712
|
+
value: categoryValue,
|
|
713
|
+
});
|
|
714
|
+
storeResult(probeResults, entitiesR);
|
|
715
|
+
|
|
716
|
+
const entitiesData = probeData<{
|
|
717
|
+
entities?: Array<{ entityType: string; entityId: string; entityName?: string | null }>;
|
|
718
|
+
totalCount?: number;
|
|
719
|
+
}>(entitiesR);
|
|
720
|
+
|
|
721
|
+
const entities = entitiesData?.entities ?? [];
|
|
722
|
+
const vmEntities = entities.filter((e) => e.entityType === 'vm');
|
|
723
|
+
|
|
724
|
+
if (entities.length === 0) {
|
|
725
|
+
findings.push({
|
|
726
|
+
severity: 'info',
|
|
727
|
+
title: `No entities tagged ${categoryKey}:${categoryValue}`,
|
|
728
|
+
detail: 'No VMs or other entities found with this category assignment',
|
|
729
|
+
relatedProbes: ['nutanix.categories.entities'],
|
|
730
|
+
});
|
|
731
|
+
return buildResult(
|
|
732
|
+
'nutanix-storefront-investigate',
|
|
733
|
+
findings,
|
|
734
|
+
probeResults,
|
|
735
|
+
startTime,
|
|
736
|
+
`No entities tagged ${categoryKey}:${categoryValue}`,
|
|
737
|
+
);
|
|
738
|
+
}
|
|
739
|
+
|
|
740
|
+
if (vmEntities.length === 0) {
|
|
741
|
+
findings.push({
|
|
742
|
+
severity: 'info',
|
|
743
|
+
title: `${entities.length} entity(ies) found but no VMs`,
|
|
744
|
+
detail: `Entities tagged ${categoryKey}:${categoryValue}: ${entities.map((e) => `${e.entityType}:${e.entityName ?? e.entityId}`).join(', ')}`,
|
|
745
|
+
relatedProbes: ['nutanix.categories.entities'],
|
|
746
|
+
});
|
|
747
|
+
return buildResult(
|
|
748
|
+
'nutanix-storefront-investigate',
|
|
749
|
+
findings,
|
|
750
|
+
probeResults,
|
|
751
|
+
startTime,
|
|
752
|
+
`${entities.length} entity(ies) found, 0 VMs`,
|
|
753
|
+
);
|
|
754
|
+
}
|
|
755
|
+
|
|
756
|
+
// Step 2: parallel detail + stats for each VM (limit to 10 for performance)
|
|
757
|
+
const vmsToCheck = vmEntities.slice(0, 10);
|
|
758
|
+
const vmResults = await Promise.all(
|
|
759
|
+
vmsToCheck.map(async (vm) => {
|
|
760
|
+
const [detailR, statsR] = await Promise.all([
|
|
761
|
+
runProbe('nutanix.vm.detail', { vm_id: vm.entityId }).then((r) =>
|
|
762
|
+
storeResult(probeResults, {
|
|
763
|
+
...r,
|
|
764
|
+
probe: `nutanix.vm.detail:${vm.entityName ?? vm.entityId}`,
|
|
765
|
+
}),
|
|
766
|
+
),
|
|
767
|
+
runProbe('nutanix.vm.stats', { vm_id: vm.entityId }).then((r) =>
|
|
768
|
+
storeResult(probeResults, {
|
|
769
|
+
...r,
|
|
770
|
+
probe: `nutanix.vm.stats:${vm.entityName ?? vm.entityId}`,
|
|
771
|
+
}),
|
|
772
|
+
),
|
|
773
|
+
]);
|
|
774
|
+
return { vm, detailR, statsR };
|
|
775
|
+
}),
|
|
776
|
+
);
|
|
777
|
+
|
|
778
|
+
// Step 3: get alerts for the environment
|
|
779
|
+
const alertsR = await runProbe('nutanix.alerts.list', {
|
|
780
|
+
resolved: false,
|
|
781
|
+
severity: 'CRITICAL',
|
|
782
|
+
limit: 50,
|
|
783
|
+
});
|
|
784
|
+
storeResult(probeResults, alertsR);
|
|
785
|
+
|
|
786
|
+
// --- Analysis per VM ---
|
|
787
|
+
|
|
788
|
+
let poweredOffCount = 0;
|
|
789
|
+
let highCpuCount = 0;
|
|
790
|
+
let highMemCount = 0;
|
|
791
|
+
|
|
792
|
+
for (const { vm, detailR, statsR } of vmResults) {
|
|
793
|
+
const vmLabel = vm.entityName ?? vm.entityId;
|
|
794
|
+
const detail = probeData<{
|
|
795
|
+
name?: string | null;
|
|
796
|
+
powerState?: string | null;
|
|
797
|
+
guestTools?: Record<string, unknown> | null;
|
|
798
|
+
}>(detailR);
|
|
799
|
+
const stats = probeData<{
|
|
800
|
+
cpuUsagePct?: number | null;
|
|
801
|
+
memoryUsagePct?: number | null;
|
|
802
|
+
avgIoLatencyMs?: number | null;
|
|
803
|
+
}>(statsR);
|
|
804
|
+
|
|
805
|
+
if (detail?.powerState === 'OFF') {
|
|
806
|
+
poweredOffCount++;
|
|
807
|
+
findings.push({
|
|
808
|
+
severity: 'warning',
|
|
809
|
+
title: `${vmLabel} is powered off`,
|
|
810
|
+
detail: `VM tagged ${categoryKey}:${categoryValue} is not running`,
|
|
811
|
+
relatedProbes: [`nutanix.vm.detail:${vmLabel}`],
|
|
812
|
+
});
|
|
813
|
+
continue;
|
|
814
|
+
}
|
|
815
|
+
|
|
816
|
+
if (stats) {
|
|
817
|
+
if (stats.cpuUsagePct != null && stats.cpuUsagePct > 85) {
|
|
818
|
+
highCpuCount++;
|
|
819
|
+
findings.push({
|
|
820
|
+
severity: 'warning',
|
|
821
|
+
title: `${vmLabel} CPU at ${stats.cpuUsagePct}%`,
|
|
822
|
+
detail: `High CPU on ${categoryKey}:${categoryValue} tagged VM`,
|
|
823
|
+
relatedProbes: [`nutanix.vm.stats:${vmLabel}`],
|
|
824
|
+
});
|
|
825
|
+
}
|
|
826
|
+
|
|
827
|
+
if (stats.memoryUsagePct != null && stats.memoryUsagePct > 90) {
|
|
828
|
+
highMemCount++;
|
|
829
|
+
findings.push({
|
|
830
|
+
severity: 'warning',
|
|
831
|
+
title: `${vmLabel} memory at ${stats.memoryUsagePct}%`,
|
|
832
|
+
detail: `High memory on ${categoryKey}:${categoryValue} tagged VM`,
|
|
833
|
+
relatedProbes: [`nutanix.vm.stats:${vmLabel}`],
|
|
834
|
+
});
|
|
835
|
+
}
|
|
836
|
+
|
|
837
|
+
if (stats.avgIoLatencyMs != null && stats.avgIoLatencyMs > 20) {
|
|
838
|
+
findings.push({
|
|
839
|
+
severity: 'warning',
|
|
840
|
+
title: `${vmLabel} I/O latency at ${stats.avgIoLatencyMs}ms`,
|
|
841
|
+
detail: `High I/O latency on ${categoryKey}:${categoryValue} tagged VM`,
|
|
842
|
+
relatedProbes: [`nutanix.vm.stats:${vmLabel}`],
|
|
843
|
+
});
|
|
844
|
+
}
|
|
845
|
+
}
|
|
846
|
+
}
|
|
847
|
+
|
|
848
|
+
// Cross-reference alerts with tagged VMs
|
|
849
|
+
const alertsDataInv = probeData<{ alerts?: AlertInfo[] }>(alertsR);
|
|
850
|
+
const vmIds = new Set(vmsToCheck.map((v) => v.entityId));
|
|
851
|
+
const vmNames = new Set(vmsToCheck.map((v) => v.entityName).filter(Boolean));
|
|
852
|
+
|
|
853
|
+
const relatedAlerts = (alertsDataInv?.alerts ?? []).filter(
|
|
854
|
+
(a) =>
|
|
855
|
+
(a.sourceEntity?.extId && vmIds.has(a.sourceEntity.extId)) ||
|
|
856
|
+
(a.sourceEntity?.name && vmNames.has(a.sourceEntity.name)),
|
|
857
|
+
);
|
|
858
|
+
|
|
859
|
+
for (const alert of relatedAlerts.slice(0, 5)) {
|
|
860
|
+
findings.push({
|
|
861
|
+
severity: 'critical',
|
|
862
|
+
title: `Critical alert on ${alert.sourceEntity?.name ?? 'tagged VM'}: ${alert.title}`,
|
|
863
|
+
detail: `Alert on ${categoryKey}:${categoryValue} tagged entity`,
|
|
864
|
+
relatedProbes: ['nutanix.alerts.list'],
|
|
865
|
+
});
|
|
866
|
+
}
|
|
867
|
+
|
|
868
|
+
// Summary-level findings
|
|
869
|
+
if (poweredOffCount > 0 && poweredOffCount === vmsToCheck.length) {
|
|
870
|
+
findings.push({
|
|
871
|
+
severity: 'critical',
|
|
872
|
+
title: `All ${categoryKey}:${categoryValue} VMs are powered off`,
|
|
873
|
+
detail: `All ${poweredOffCount} VMs tagged with this category are not running`,
|
|
874
|
+
relatedProbes: ['nutanix.categories.entities'],
|
|
875
|
+
});
|
|
876
|
+
}
|
|
877
|
+
|
|
878
|
+
// All clear
|
|
879
|
+
if (findings.length === 0) {
|
|
880
|
+
findings.push({
|
|
881
|
+
severity: 'info',
|
|
882
|
+
title: `All ${categoryKey}:${categoryValue} VMs are healthy`,
|
|
883
|
+
detail: `${vmsToCheck.length} VM(s) checked, all running with performance within thresholds`,
|
|
884
|
+
relatedProbes: ['nutanix.categories.entities'],
|
|
885
|
+
});
|
|
886
|
+
}
|
|
887
|
+
|
|
888
|
+
const issueCount = findings.filter((f) => f.severity !== 'info').length;
|
|
889
|
+
return buildResult(
|
|
890
|
+
'nutanix-storefront-investigate',
|
|
891
|
+
findings,
|
|
892
|
+
probeResults,
|
|
893
|
+
startTime,
|
|
894
|
+
`${vmsToCheck.length} VM(s) tagged ${categoryKey}:${categoryValue}, ${issueCount} issue(s)`,
|
|
895
|
+
);
|
|
896
|
+
}
|
|
897
|
+
|
|
898
|
+
// --- Export ---
|
|
899
|
+
|
|
900
|
+
export const nutanixDiagnosticRunbooks: DiagnosticRunbookDefinition[] = [
|
|
901
|
+
{
|
|
902
|
+
category: 'nutanix-cluster-health',
|
|
903
|
+
description:
|
|
904
|
+
'Fleet-wide Nutanix environment health overview — clusters, hosts, alerts, storage, tasks, LCM updates',
|
|
905
|
+
handler: clusterHealthHandler,
|
|
906
|
+
},
|
|
907
|
+
{
|
|
908
|
+
category: 'nutanix-vm-health',
|
|
909
|
+
description:
|
|
910
|
+
'Deep health check for a single Nutanix VM — performance, protection, snapshots, alerts',
|
|
911
|
+
params: {
|
|
912
|
+
vm_id: { type: 'string', description: 'VM extId', required: false },
|
|
913
|
+
vm_name: { type: 'string', description: 'VM name (resolved to extId)', required: false },
|
|
914
|
+
},
|
|
915
|
+
handler: vmHealthHandler,
|
|
916
|
+
},
|
|
917
|
+
{
|
|
918
|
+
category: 'nutanix-capacity-planning',
|
|
919
|
+
description:
|
|
920
|
+
'Capacity and headroom analysis across all Nutanix clusters — CPU, memory, storage utilization',
|
|
921
|
+
handler: capacityPlanningHandler,
|
|
922
|
+
},
|
|
923
|
+
{
|
|
924
|
+
category: 'nutanix-storefront-investigate',
|
|
925
|
+
description:
|
|
926
|
+
'Investigate VMs tagged with a Nutanix category — cross-reference performance, alerts, and health',
|
|
927
|
+
params: {
|
|
928
|
+
category_key: {
|
|
929
|
+
type: 'string',
|
|
930
|
+
description: 'Nutanix category key (e.g. Environment)',
|
|
931
|
+
required: true,
|
|
932
|
+
},
|
|
933
|
+
category_value: {
|
|
934
|
+
type: 'string',
|
|
935
|
+
description: 'Nutanix category value (e.g. Storefront)',
|
|
936
|
+
required: true,
|
|
937
|
+
},
|
|
938
|
+
},
|
|
939
|
+
handler: storefrontInvestigateHandler,
|
|
940
|
+
},
|
|
941
|
+
];
|