@sonde/packs 0.1.0 → 0.1.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.turbo/turbo-build.log +6 -0
- package/.turbo/turbo-test.log +814 -0
- package/.turbo/turbo-typecheck.log +4 -0
- package/CHANGELOG.md +10 -0
- package/dist/index.d.ts +16 -1
- package/dist/index.d.ts.map +1 -1
- package/dist/index.js +40 -2
- package/dist/index.js.map +1 -1
- package/dist/integrations/citrix.d.ts +13 -0
- package/dist/integrations/citrix.d.ts.map +1 -0
- package/dist/integrations/citrix.js +420 -0
- package/dist/integrations/citrix.js.map +1 -0
- package/dist/integrations/citrix.test.d.ts +2 -0
- package/dist/integrations/citrix.test.d.ts.map +1 -0
- package/dist/integrations/citrix.test.js +464 -0
- package/dist/integrations/citrix.test.js.map +1 -0
- package/dist/integrations/graph.d.ts +9 -0
- package/dist/integrations/graph.d.ts.map +1 -0
- package/dist/integrations/graph.js +290 -0
- package/dist/integrations/graph.js.map +1 -0
- package/dist/integrations/graph.test.d.ts +2 -0
- package/dist/integrations/graph.test.d.ts.map +1 -0
- package/dist/integrations/graph.test.js +356 -0
- package/dist/integrations/graph.test.js.map +1 -0
- package/dist/integrations/httpbin.d.ts +3 -0
- package/dist/integrations/httpbin.d.ts.map +1 -0
- package/dist/integrations/httpbin.js +70 -0
- package/dist/integrations/httpbin.js.map +1 -0
- package/dist/integrations/nutanix.d.ts +18 -0
- package/dist/integrations/nutanix.d.ts.map +1 -0
- package/dist/integrations/nutanix.js +1121 -0
- package/dist/integrations/nutanix.js.map +1 -0
- package/dist/integrations/nutanix.test.d.ts +2 -0
- package/dist/integrations/nutanix.test.d.ts.map +1 -0
- package/dist/integrations/nutanix.test.js +978 -0
- package/dist/integrations/nutanix.test.js.map +1 -0
- package/dist/integrations/proxmox.d.ts +12 -0
- package/dist/integrations/proxmox.d.ts.map +1 -0
- package/dist/integrations/proxmox.js +733 -0
- package/dist/integrations/proxmox.js.map +1 -0
- package/dist/integrations/proxmox.test.d.ts +2 -0
- package/dist/integrations/proxmox.test.d.ts.map +1 -0
- package/dist/integrations/proxmox.test.js +697 -0
- package/dist/integrations/proxmox.test.js.map +1 -0
- package/dist/integrations/servicenow.d.ts +3 -0
- package/dist/integrations/servicenow.d.ts.map +1 -0
- package/dist/integrations/servicenow.js +257 -0
- package/dist/integrations/servicenow.js.map +1 -0
- package/dist/integrations/servicenow.test.d.ts +2 -0
- package/dist/integrations/servicenow.test.d.ts.map +1 -0
- package/dist/integrations/servicenow.test.js +217 -0
- package/dist/integrations/servicenow.test.js.map +1 -0
- package/dist/integrations/splunk.d.ts +9 -0
- package/dist/integrations/splunk.d.ts.map +1 -0
- package/dist/integrations/splunk.js +242 -0
- package/dist/integrations/splunk.js.map +1 -0
- package/dist/integrations/splunk.test.d.ts +2 -0
- package/dist/integrations/splunk.test.d.ts.map +1 -0
- package/dist/integrations/splunk.test.js +323 -0
- package/dist/integrations/splunk.test.js.map +1 -0
- package/dist/mysql/index.d.ts +3 -0
- package/dist/mysql/index.d.ts.map +1 -0
- package/dist/mysql/index.js +13 -0
- package/dist/mysql/index.js.map +1 -0
- package/dist/mysql/manifest.d.ts +3 -0
- package/dist/mysql/manifest.d.ts.map +1 -0
- package/dist/mysql/manifest.js +69 -0
- package/dist/mysql/manifest.js.map +1 -0
- package/dist/mysql/probes/databases-list.d.ts +13 -0
- package/dist/mysql/probes/databases-list.d.ts.map +1 -0
- package/dist/mysql/probes/databases-list.js +31 -0
- package/dist/mysql/probes/databases-list.js.map +1 -0
- package/dist/mysql/probes/databases-list.test.d.ts +2 -0
- package/dist/mysql/probes/databases-list.test.d.ts.map +1 -0
- package/dist/mysql/probes/databases-list.test.js +54 -0
- package/dist/mysql/probes/databases-list.test.js.map +1 -0
- package/dist/mysql/probes/processlist.d.ts +18 -0
- package/dist/mysql/probes/processlist.d.ts.map +1 -0
- package/dist/mysql/probes/processlist.js +36 -0
- package/dist/mysql/probes/processlist.js.map +1 -0
- package/dist/mysql/probes/processlist.test.d.ts +2 -0
- package/dist/mysql/probes/processlist.test.d.ts.map +1 -0
- package/dist/mysql/probes/processlist.test.js +41 -0
- package/dist/mysql/probes/processlist.test.js.map +1 -0
- package/dist/mysql/probes/status.d.ts +14 -0
- package/dist/mysql/probes/status.d.ts.map +1 -0
- package/dist/mysql/probes/status.js +40 -0
- package/dist/mysql/probes/status.js.map +1 -0
- package/dist/mysql/probes/status.test.d.ts +2 -0
- package/dist/mysql/probes/status.test.d.ts.map +1 -0
- package/dist/mysql/probes/status.test.js +43 -0
- package/dist/mysql/probes/status.test.js.map +1 -0
- package/dist/nginx/index.d.ts +3 -0
- package/dist/nginx/index.d.ts.map +1 -0
- package/dist/nginx/index.js +13 -0
- package/dist/nginx/index.js.map +1 -0
- package/dist/nginx/manifest.d.ts +3 -0
- package/dist/nginx/manifest.d.ts.map +1 -0
- package/dist/nginx/manifest.js +68 -0
- package/dist/nginx/manifest.js.map +1 -0
- package/dist/nginx/probes/access-log-tail.d.ts +9 -0
- package/dist/nginx/probes/access-log-tail.d.ts.map +1 -0
- package/dist/nginx/probes/access-log-tail.js +14 -0
- package/dist/nginx/probes/access-log-tail.js.map +1 -0
- package/dist/nginx/probes/access-log-tail.test.d.ts +2 -0
- package/dist/nginx/probes/access-log-tail.test.d.ts.map +1 -0
- package/dist/nginx/probes/access-log-tail.test.js +40 -0
- package/dist/nginx/probes/access-log-tail.test.js.map +1 -0
- package/dist/nginx/probes/config-test.d.ts +8 -0
- package/dist/nginx/probes/config-test.d.ts.map +1 -0
- package/dist/nginx/probes/config-test.js +18 -0
- package/dist/nginx/probes/config-test.js.map +1 -0
- package/dist/nginx/probes/config-test.test.d.ts +2 -0
- package/dist/nginx/probes/config-test.test.d.ts.map +1 -0
- package/dist/nginx/probes/config-test.test.js +35 -0
- package/dist/nginx/probes/config-test.test.js.map +1 -0
- package/dist/nginx/probes/error-log-tail.d.ts +9 -0
- package/dist/nginx/probes/error-log-tail.d.ts.map +1 -0
- package/dist/nginx/probes/error-log-tail.js +14 -0
- package/dist/nginx/probes/error-log-tail.js.map +1 -0
- package/dist/nginx/probes/error-log-tail.test.d.ts +2 -0
- package/dist/nginx/probes/error-log-tail.test.d.ts.map +1 -0
- package/dist/nginx/probes/error-log-tail.test.js +34 -0
- package/dist/nginx/probes/error-log-tail.test.js.map +1 -0
- package/dist/postgres/index.d.ts +3 -0
- package/dist/postgres/index.d.ts.map +1 -0
- package/dist/postgres/index.js +13 -0
- package/dist/postgres/index.js.map +1 -0
- package/dist/postgres/manifest.d.ts +3 -0
- package/dist/postgres/manifest.d.ts.map +1 -0
- package/dist/postgres/manifest.js +90 -0
- package/dist/postgres/manifest.js.map +1 -0
- package/dist/postgres/probes/connections-active.d.ts +17 -0
- package/dist/postgres/probes/connections-active.d.ts.map +1 -0
- package/dist/postgres/probes/connections-active.js +37 -0
- package/dist/postgres/probes/connections-active.js.map +1 -0
- package/dist/postgres/probes/connections-active.test.d.ts +2 -0
- package/dist/postgres/probes/connections-active.test.d.ts.map +1 -0
- package/dist/postgres/probes/connections-active.test.js +36 -0
- package/dist/postgres/probes/connections-active.test.js.map +1 -0
- package/dist/postgres/probes/databases-list.d.ts +14 -0
- package/dist/postgres/probes/databases-list.d.ts.map +1 -0
- package/dist/postgres/probes/databases-list.js +34 -0
- package/dist/postgres/probes/databases-list.js.map +1 -0
- package/dist/postgres/probes/databases-list.test.d.ts +2 -0
- package/dist/postgres/probes/databases-list.test.d.ts.map +1 -0
- package/dist/postgres/probes/databases-list.test.js +49 -0
- package/dist/postgres/probes/databases-list.test.js.map +1 -0
- package/dist/postgres/probes/query-slow.d.ts +17 -0
- package/dist/postgres/probes/query-slow.d.ts.map +1 -0
- package/dist/postgres/probes/query-slow.js +37 -0
- package/dist/postgres/probes/query-slow.js.map +1 -0
- package/dist/postgres/probes/query-slow.test.d.ts +2 -0
- package/dist/postgres/probes/query-slow.test.d.ts.map +1 -0
- package/dist/postgres/probes/query-slow.test.js +30 -0
- package/dist/postgres/probes/query-slow.test.js.map +1 -0
- package/dist/proxmox/index.d.ts +3 -0
- package/dist/proxmox/index.d.ts.map +1 -0
- package/dist/proxmox/index.js +23 -0
- package/dist/proxmox/index.js.map +1 -0
- package/dist/proxmox/manifest.d.ts +3 -0
- package/dist/proxmox/manifest.d.ts.map +1 -0
- package/dist/proxmox/manifest.js +75 -0
- package/dist/proxmox/manifest.js.map +1 -0
- package/dist/proxmox/probes/ceph-status.d.ts +36 -0
- package/dist/proxmox/probes/ceph-status.d.ts.map +1 -0
- package/dist/proxmox/probes/ceph-status.js +71 -0
- package/dist/proxmox/probes/ceph-status.js.map +1 -0
- package/dist/proxmox/probes/ceph-status.test.d.ts +2 -0
- package/dist/proxmox/probes/ceph-status.test.d.ts.map +1 -0
- package/dist/proxmox/probes/ceph-status.test.js +115 -0
- package/dist/proxmox/probes/ceph-status.test.js.map +1 -0
- package/dist/proxmox/probes/cluster-config.d.ts +31 -0
- package/dist/proxmox/probes/cluster-config.d.ts.map +1 -0
- package/dist/proxmox/probes/cluster-config.js +72 -0
- package/dist/proxmox/probes/cluster-config.js.map +1 -0
- package/dist/proxmox/probes/cluster-config.test.d.ts +2 -0
- package/dist/proxmox/probes/cluster-config.test.d.ts.map +1 -0
- package/dist/proxmox/probes/cluster-config.test.js +107 -0
- package/dist/proxmox/probes/cluster-config.test.js.map +1 -0
- package/dist/proxmox/probes/ha-status.d.ts +18 -0
- package/dist/proxmox/probes/ha-status.d.ts.map +1 -0
- package/dist/proxmox/probes/ha-status.js +38 -0
- package/dist/proxmox/probes/ha-status.js.map +1 -0
- package/dist/proxmox/probes/ha-status.test.d.ts +2 -0
- package/dist/proxmox/probes/ha-status.test.d.ts.map +1 -0
- package/dist/proxmox/probes/ha-status.test.js +66 -0
- package/dist/proxmox/probes/ha-status.test.js.map +1 -0
- package/dist/proxmox/probes/lvm.d.ts +35 -0
- package/dist/proxmox/probes/lvm.d.ts.map +1 -0
- package/dist/proxmox/probes/lvm.js +75 -0
- package/dist/proxmox/probes/lvm.js.map +1 -0
- package/dist/proxmox/probes/lvm.test.d.ts +2 -0
- package/dist/proxmox/probes/lvm.test.d.ts.map +1 -0
- package/dist/proxmox/probes/lvm.test.js +128 -0
- package/dist/proxmox/probes/lvm.test.js.map +1 -0
- package/dist/proxmox/probes/lxc-config.d.ts +29 -0
- package/dist/proxmox/probes/lxc-config.d.ts.map +1 -0
- package/dist/proxmox/probes/lxc-config.js +67 -0
- package/dist/proxmox/probes/lxc-config.js.map +1 -0
- package/dist/proxmox/probes/lxc-config.test.d.ts +2 -0
- package/dist/proxmox/probes/lxc-config.test.d.ts.map +1 -0
- package/dist/proxmox/probes/lxc-config.test.js +77 -0
- package/dist/proxmox/probes/lxc-config.test.js.map +1 -0
- package/dist/proxmox/probes/lxc-list.d.ts +20 -0
- package/dist/proxmox/probes/lxc-list.d.ts.map +1 -0
- package/dist/proxmox/probes/lxc-list.js +49 -0
- package/dist/proxmox/probes/lxc-list.js.map +1 -0
- package/dist/proxmox/probes/lxc-list.test.d.ts +2 -0
- package/dist/proxmox/probes/lxc-list.test.d.ts.map +1 -0
- package/dist/proxmox/probes/lxc-list.test.js +51 -0
- package/dist/proxmox/probes/lxc-list.test.js.map +1 -0
- package/dist/proxmox/probes/vm-config.d.ts +21 -0
- package/dist/proxmox/probes/vm-config.d.ts.map +1 -0
- package/dist/proxmox/probes/vm-config.js +58 -0
- package/dist/proxmox/probes/vm-config.js.map +1 -0
- package/dist/proxmox/probes/vm-config.test.d.ts +2 -0
- package/dist/proxmox/probes/vm-config.test.d.ts.map +1 -0
- package/dist/proxmox/probes/vm-config.test.js +80 -0
- package/dist/proxmox/probes/vm-config.test.js.map +1 -0
- package/dist/proxmox/probes/vm-locks.d.ts +16 -0
- package/dist/proxmox/probes/vm-locks.d.ts.map +1 -0
- package/dist/proxmox/probes/vm-locks.js +35 -0
- package/dist/proxmox/probes/vm-locks.js.map +1 -0
- package/dist/proxmox/probes/vm-locks.test.d.ts +2 -0
- package/dist/proxmox/probes/vm-locks.test.d.ts.map +1 -0
- package/dist/proxmox/probes/vm-locks.test.js +54 -0
- package/dist/proxmox/probes/vm-locks.test.js.map +1 -0
- package/dist/redis/index.d.ts +3 -0
- package/dist/redis/index.d.ts.map +1 -0
- package/dist/redis/index.js +13 -0
- package/dist/redis/index.js.map +1 -0
- package/dist/redis/manifest.d.ts +3 -0
- package/dist/redis/manifest.d.ts.map +1 -0
- package/dist/redis/manifest.js +51 -0
- package/dist/redis/manifest.js.map +1 -0
- package/dist/redis/probes/info.d.ts +15 -0
- package/dist/redis/probes/info.d.ts.map +1 -0
- package/dist/redis/probes/info.js +32 -0
- package/dist/redis/probes/info.js.map +1 -0
- package/dist/redis/probes/info.test.d.ts +2 -0
- package/dist/redis/probes/info.test.d.ts.map +1 -0
- package/dist/redis/probes/info.test.js +64 -0
- package/dist/redis/probes/info.test.js.map +1 -0
- package/dist/redis/probes/keys-count.d.ts +13 -0
- package/dist/redis/probes/keys-count.d.ts.map +1 -0
- package/dist/redis/probes/keys-count.js +24 -0
- package/dist/redis/probes/keys-count.js.map +1 -0
- package/dist/redis/probes/keys-count.test.d.ts +2 -0
- package/dist/redis/probes/keys-count.test.d.ts.map +1 -0
- package/dist/redis/probes/keys-count.test.js +37 -0
- package/dist/redis/probes/keys-count.test.js.map +1 -0
- package/dist/redis/probes/memory-usage.d.ts +16 -0
- package/dist/redis/probes/memory-usage.d.ts.map +1 -0
- package/dist/redis/probes/memory-usage.js +31 -0
- package/dist/redis/probes/memory-usage.js.map +1 -0
- package/dist/redis/probes/memory-usage.test.d.ts +2 -0
- package/dist/redis/probes/memory-usage.test.d.ts.map +1 -0
- package/dist/redis/probes/memory-usage.test.js +48 -0
- package/dist/redis/probes/memory-usage.test.js.map +1 -0
- package/dist/runbooks/nutanix.d.ts +3 -0
- package/dist/runbooks/nutanix.d.ts.map +1 -0
- package/dist/runbooks/nutanix.js +619 -0
- package/dist/runbooks/nutanix.js.map +1 -0
- package/dist/runbooks/nutanix.test.d.ts +2 -0
- package/dist/runbooks/nutanix.test.d.ts.map +1 -0
- package/dist/runbooks/nutanix.test.js +971 -0
- package/dist/runbooks/nutanix.test.js.map +1 -0
- package/dist/runbooks/proxmox.d.ts +3 -0
- package/dist/runbooks/proxmox.d.ts.map +1 -0
- package/dist/runbooks/proxmox.js +451 -0
- package/dist/runbooks/proxmox.js.map +1 -0
- package/dist/runbooks/proxmox.test.d.ts +2 -0
- package/dist/runbooks/proxmox.test.d.ts.map +1 -0
- package/dist/runbooks/proxmox.test.js +700 -0
- package/dist/runbooks/proxmox.test.js.map +1 -0
- package/dist/signatures.d.ts +2 -0
- package/dist/signatures.d.ts.map +1 -0
- package/dist/signatures.js +2 -0
- package/dist/signatures.js.map +1 -0
- package/dist/types.d.ts +53 -0
- package/dist/types.d.ts.map +1 -1
- package/dist/validation.d.ts +6 -1
- package/dist/validation.d.ts.map +1 -1
- package/dist/validation.js +10 -1
- package/dist/validation.js.map +1 -1
- package/package.json +1 -1
- package/src/index.ts +60 -6
- package/src/integrations/citrix.test.ts +592 -0
- package/src/integrations/citrix.ts +557 -0
- package/src/integrations/graph.test.ts +478 -0
- package/src/integrations/graph.ts +413 -0
- package/src/integrations/httpbin.ts +72 -0
- package/src/integrations/nutanix.test.ts +1508 -0
- package/src/integrations/nutanix.ts +1460 -0
- package/src/integrations/proxmox.test.ts +1020 -0
- package/src/integrations/proxmox.ts +989 -0
- package/src/integrations/servicenow.test.ts +314 -0
- package/src/integrations/servicenow.ts +285 -0
- package/src/integrations/splunk.test.ts +440 -0
- package/src/integrations/splunk.ts +356 -0
- package/src/mysql/index.ts +14 -0
- package/src/mysql/manifest.ts +70 -0
- package/src/mysql/probes/databases-list.test.ts +62 -0
- package/src/mysql/probes/databases-list.ts +45 -0
- package/src/mysql/probes/processlist.test.ts +47 -0
- package/src/mysql/probes/processlist.ts +55 -0
- package/src/mysql/probes/status.test.ts +50 -0
- package/src/mysql/probes/status.ts +56 -0
- package/src/nginx/index.ts +14 -0
- package/src/nginx/manifest.ts +69 -0
- package/src/nginx/probes/access-log-tail.test.ts +51 -0
- package/src/nginx/probes/access-log-tail.ts +23 -0
- package/src/nginx/probes/config-test.test.ts +47 -0
- package/src/nginx/probes/config-test.ts +24 -0
- package/src/nginx/probes/error-log-tail.test.ts +44 -0
- package/src/nginx/probes/error-log-tail.ts +23 -0
- package/src/postgres/index.ts +14 -0
- package/src/postgres/manifest.ts +91 -0
- package/src/postgres/probes/connections-active.test.ts +42 -0
- package/src/postgres/probes/connections-active.ts +55 -0
- package/src/postgres/probes/databases-list.test.ts +57 -0
- package/src/postgres/probes/databases-list.ts +49 -0
- package/src/postgres/probes/query-slow.test.ts +37 -0
- package/src/postgres/probes/query-slow.ts +55 -0
- package/src/proxmox/index.ts +24 -0
- package/src/proxmox/manifest.ts +76 -0
- package/src/proxmox/probes/ceph-status.test.ts +126 -0
- package/src/proxmox/probes/ceph-status.ts +116 -0
- package/src/proxmox/probes/cluster-config.test.ts +118 -0
- package/src/proxmox/probes/cluster-config.ts +97 -0
- package/src/proxmox/probes/ha-status.test.ts +76 -0
- package/src/proxmox/probes/ha-status.ts +56 -0
- package/src/proxmox/probes/lvm.test.ts +140 -0
- package/src/proxmox/probes/lvm.ts +121 -0
- package/src/proxmox/probes/lxc-config.test.ts +89 -0
- package/src/proxmox/probes/lxc-config.ts +90 -0
- package/src/proxmox/probes/lxc-list.test.ts +60 -0
- package/src/proxmox/probes/lxc-list.ts +67 -0
- package/src/proxmox/probes/vm-config.test.ts +93 -0
- package/src/proxmox/probes/vm-config.ts +77 -0
- package/src/proxmox/probes/vm-locks.test.ts +63 -0
- package/src/proxmox/probes/vm-locks.ts +49 -0
- package/src/redis/index.ts +14 -0
- package/src/redis/manifest.ts +52 -0
- package/src/redis/probes/info.test.ts +73 -0
- package/src/redis/probes/info.ts +46 -0
- package/src/redis/probes/keys-count.test.ts +44 -0
- package/src/redis/probes/keys-count.ts +38 -0
- package/src/redis/probes/memory-usage.test.ts +54 -0
- package/src/redis/probes/memory-usage.ts +46 -0
- package/src/runbooks/nutanix.test.ts +1138 -0
- package/src/runbooks/nutanix.ts +941 -0
- package/src/runbooks/proxmox.test.ts +838 -0
- package/src/runbooks/proxmox.ts +626 -0
- package/src/signatures.ts +1 -0
- package/src/types.ts +62 -0
- package/src/validation.ts +21 -1
- package/tsconfig.tsbuildinfo +1 -0
|
@@ -0,0 +1,733 @@
|
|
|
1
|
+
// --- Auth helpers ---
|
|
2
|
+
/** Build Proxmox PVEAPIToken auth header */
|
|
3
|
+
export function buildAuthHeaders(credentials) {
|
|
4
|
+
const tokenId = credentials.credentials.tokenId ?? '';
|
|
5
|
+
const tokenSecret = credentials.credentials.tokenSecret ?? '';
|
|
6
|
+
return { Authorization: `PVEAPIToken=${tokenId}=${tokenSecret}` };
|
|
7
|
+
}
|
|
8
|
+
// --- Proxmox REST helper ---
|
|
9
|
+
function proxmoxUrl(endpoint, path, params) {
|
|
10
|
+
const base = `${endpoint.replace(/\/$/, '')}/api2/json${path}`;
|
|
11
|
+
const url = new URL(base);
|
|
12
|
+
if (params) {
|
|
13
|
+
for (const [key, value] of Object.entries(params)) {
|
|
14
|
+
url.searchParams.set(key, value);
|
|
15
|
+
}
|
|
16
|
+
}
|
|
17
|
+
return url.toString();
|
|
18
|
+
}
|
|
19
|
+
/** GET a Proxmox API endpoint, returns parsed JSON */
|
|
20
|
+
export async function proxmoxGet(path, config, credentials, fetchFn, params) {
|
|
21
|
+
const url = proxmoxUrl(config.endpoint, path, params);
|
|
22
|
+
const headers = {
|
|
23
|
+
Accept: 'application/json',
|
|
24
|
+
...buildAuthHeaders(credentials),
|
|
25
|
+
...config.headers,
|
|
26
|
+
};
|
|
27
|
+
const res = await fetchFn(url, { headers });
|
|
28
|
+
if (!res.ok)
|
|
29
|
+
throw new Error(`Proxmox API returned ${res.status}: ${res.statusText}`);
|
|
30
|
+
return res.json();
|
|
31
|
+
}
|
|
32
|
+
// --- Node resolver ---
|
|
33
|
+
/** Resolve which node a VM/container lives on by querying cluster resources */
|
|
34
|
+
export async function resolveNode(vmid, config, credentials, fetchFn) {
|
|
35
|
+
const data = (await proxmoxGet('/cluster/resources', config, credentials, fetchFn, {
|
|
36
|
+
type: 'vm',
|
|
37
|
+
}));
|
|
38
|
+
const entry = (data.data ?? []).find((r) => r.vmid === vmid);
|
|
39
|
+
if (!entry || !entry.node) {
|
|
40
|
+
throw new Error(`VM/container ${vmid} not found in cluster`);
|
|
41
|
+
}
|
|
42
|
+
return { node: entry.node, type: entry.type };
|
|
43
|
+
}
|
|
44
|
+
// --- Probe handlers ---
|
|
45
|
+
const clusterStatus = async (_params, config, credentials, fetchFn) => {
|
|
46
|
+
const data = (await proxmoxGet('/cluster/status', config, credentials, fetchFn));
|
|
47
|
+
const entries = data.data ?? [];
|
|
48
|
+
const clusterEntry = entries.find((e) => e.type === 'cluster');
|
|
49
|
+
const nodeEntries = entries.filter((e) => e.type === 'node');
|
|
50
|
+
const warnings = [];
|
|
51
|
+
if (clusterEntry && !clusterEntry.quorate) {
|
|
52
|
+
warnings.push('Cluster has lost quorum');
|
|
53
|
+
}
|
|
54
|
+
for (const n of nodeEntries) {
|
|
55
|
+
if (!n.online) {
|
|
56
|
+
warnings.push(`Node ${n.name} is offline`);
|
|
57
|
+
}
|
|
58
|
+
}
|
|
59
|
+
return {
|
|
60
|
+
clusterName: clusterEntry?.name ?? null,
|
|
61
|
+
quorate: !!clusterEntry?.quorate,
|
|
62
|
+
nodes: nodeEntries.map((n) => ({
|
|
63
|
+
name: n.name,
|
|
64
|
+
online: !!n.online,
|
|
65
|
+
ip: n.ip ?? null,
|
|
66
|
+
type: n.type,
|
|
67
|
+
})),
|
|
68
|
+
warnings,
|
|
69
|
+
};
|
|
70
|
+
};
|
|
71
|
+
const clusterHaStatus = async (_params, config, credentials, fetchFn) => {
|
|
72
|
+
const [statusData, resourcesData] = await Promise.all([
|
|
73
|
+
proxmoxGet('/cluster/ha/status/current', config, credentials, fetchFn),
|
|
74
|
+
proxmoxGet('/cluster/ha/resources', config, credentials, fetchFn),
|
|
75
|
+
]);
|
|
76
|
+
const statusEntries = statusData.data ?? [];
|
|
77
|
+
const managerEntry = statusEntries.find((e) => e.type === 'manager' || e.id === 'manager');
|
|
78
|
+
const resources = resourcesData.data ?? [];
|
|
79
|
+
const warnings = [];
|
|
80
|
+
for (const r of resources) {
|
|
81
|
+
if (r.state === 'error' || r.state === 'fence') {
|
|
82
|
+
warnings.push(`HA resource ${r.sid} in ${r.state} state`);
|
|
83
|
+
}
|
|
84
|
+
}
|
|
85
|
+
return {
|
|
86
|
+
managerStatus: managerEntry?.status ?? managerEntry?.state ?? null,
|
|
87
|
+
resources: resources.map((r) => ({
|
|
88
|
+
sid: r.sid,
|
|
89
|
+
state: r.state,
|
|
90
|
+
node: r.node,
|
|
91
|
+
type: r.type,
|
|
92
|
+
group: r.group ?? null,
|
|
93
|
+
})),
|
|
94
|
+
warnings,
|
|
95
|
+
};
|
|
96
|
+
};
|
|
97
|
+
const nodesList = async (_params, config, credentials, fetchFn) => {
|
|
98
|
+
const data = (await proxmoxGet('/nodes', config, credentials, fetchFn));
|
|
99
|
+
const nodes = data.data ?? [];
|
|
100
|
+
const warnings = [];
|
|
101
|
+
for (const n of nodes) {
|
|
102
|
+
if (n.status === 'offline') {
|
|
103
|
+
warnings.push(`Node ${n.node} is offline`);
|
|
104
|
+
}
|
|
105
|
+
if (n.cpu != null && n.cpu > 0.9) {
|
|
106
|
+
warnings.push(`Node ${n.node} CPU at ${Math.round(n.cpu * 100)}%`);
|
|
107
|
+
}
|
|
108
|
+
if (n.mem != null && n.maxmem != null && n.maxmem > 0 && n.mem / n.maxmem > 0.9) {
|
|
109
|
+
warnings.push(`Node ${n.node} memory at ${Math.round((n.mem / n.maxmem) * 100)}%`);
|
|
110
|
+
}
|
|
111
|
+
}
|
|
112
|
+
return {
|
|
113
|
+
nodes: nodes.map((n) => ({
|
|
114
|
+
node: n.node,
|
|
115
|
+
status: n.status,
|
|
116
|
+
uptime: n.uptime ?? 0,
|
|
117
|
+
cpu: n.cpu ?? 0,
|
|
118
|
+
maxcpu: n.maxcpu ?? 0,
|
|
119
|
+
mem: n.mem ?? 0,
|
|
120
|
+
maxmem: n.maxmem ?? 0,
|
|
121
|
+
loadavg: n.loadavg ?? null,
|
|
122
|
+
})),
|
|
123
|
+
warnings,
|
|
124
|
+
};
|
|
125
|
+
};
|
|
126
|
+
const nodeStorage = async (params, config, credentials, fetchFn) => {
|
|
127
|
+
const node = params?.node;
|
|
128
|
+
if (!node)
|
|
129
|
+
throw new Error('node parameter is required');
|
|
130
|
+
const data = (await proxmoxGet(`/nodes/${node}/storage`, config, credentials, fetchFn));
|
|
131
|
+
const storages = data.data ?? [];
|
|
132
|
+
const warnings = [];
|
|
133
|
+
for (const s of storages) {
|
|
134
|
+
if (s.total && s.used && s.total > 0 && s.used / s.total > 0.85) {
|
|
135
|
+
warnings.push(`Storage ${s.storage} is ${Math.round((s.used / s.total) * 100)}% used`);
|
|
136
|
+
}
|
|
137
|
+
if (s.enabled === 0) {
|
|
138
|
+
warnings.push(`Storage ${s.storage} is disabled`);
|
|
139
|
+
}
|
|
140
|
+
if (s.active === 0 && s.enabled !== 0) {
|
|
141
|
+
warnings.push(`Storage ${s.storage} is inactive`);
|
|
142
|
+
}
|
|
143
|
+
}
|
|
144
|
+
return {
|
|
145
|
+
storages: storages.map((s) => ({
|
|
146
|
+
storage: s.storage,
|
|
147
|
+
type: s.type,
|
|
148
|
+
total: s.total ?? 0,
|
|
149
|
+
used: s.used ?? 0,
|
|
150
|
+
avail: s.avail ?? 0,
|
|
151
|
+
shared: !!s.shared,
|
|
152
|
+
enabled: s.enabled !== 0,
|
|
153
|
+
active: s.active !== 0,
|
|
154
|
+
})),
|
|
155
|
+
warnings,
|
|
156
|
+
};
|
|
157
|
+
};
|
|
158
|
+
const vmStatus = async (params, config, credentials, fetchFn) => {
|
|
159
|
+
const vmid = params?.vmid;
|
|
160
|
+
if (vmid == null)
|
|
161
|
+
throw new Error('vmid parameter is required');
|
|
162
|
+
const data = (await proxmoxGet('/cluster/resources', config, credentials, fetchFn, {
|
|
163
|
+
type: 'vm',
|
|
164
|
+
}));
|
|
165
|
+
const vm = (data.data ?? []).find((r) => r.vmid === vmid);
|
|
166
|
+
if (!vm)
|
|
167
|
+
throw new Error(`VM/container ${vmid} not found in cluster`);
|
|
168
|
+
const warnings = [];
|
|
169
|
+
if (vm.status === 'stopped') {
|
|
170
|
+
warnings.push('VM is stopped');
|
|
171
|
+
}
|
|
172
|
+
if (vm.lock) {
|
|
173
|
+
warnings.push(`VM has lock: ${vm.lock}`);
|
|
174
|
+
}
|
|
175
|
+
if (vm.hastate && vm.hastate !== 'managed' && vm.hastate !== 'started') {
|
|
176
|
+
warnings.push(`HA state: ${vm.hastate}`);
|
|
177
|
+
}
|
|
178
|
+
return {
|
|
179
|
+
vmid: vm.vmid,
|
|
180
|
+
name: vm.name,
|
|
181
|
+
status: vm.status,
|
|
182
|
+
node: vm.node,
|
|
183
|
+
type: vm.type,
|
|
184
|
+
uptime: vm.uptime ?? 0,
|
|
185
|
+
cpu: vm.cpu ?? 0,
|
|
186
|
+
mem: vm.mem ?? 0,
|
|
187
|
+
maxmem: vm.maxmem ?? 0,
|
|
188
|
+
lock: vm.lock ?? null,
|
|
189
|
+
hastate: vm.hastate ?? null,
|
|
190
|
+
warnings,
|
|
191
|
+
};
|
|
192
|
+
};
|
|
193
|
+
const vmConfig = async (params, config, credentials, fetchFn) => {
|
|
194
|
+
const vmid = params?.vmid;
|
|
195
|
+
if (vmid == null)
|
|
196
|
+
throw new Error('vmid parameter is required');
|
|
197
|
+
let node = params?.node;
|
|
198
|
+
if (!node) {
|
|
199
|
+
const resolved = await resolveNode(vmid, config, credentials, fetchFn);
|
|
200
|
+
node = resolved.node;
|
|
201
|
+
}
|
|
202
|
+
const data = (await proxmoxGet(`/nodes/${node}/qemu/${vmid}/config`, config, credentials, fetchFn));
|
|
203
|
+
const cfg = data.data ?? {};
|
|
204
|
+
const diskKeys = ['scsi', 'ide', 'virtio', 'sata', 'efidisk', 'tpmstate'];
|
|
205
|
+
const disks = [];
|
|
206
|
+
for (const [key, value] of Object.entries(cfg)) {
|
|
207
|
+
if (typeof value !== 'string')
|
|
208
|
+
continue;
|
|
209
|
+
const matchesDisk = diskKeys.some((prefix) => key.startsWith(prefix));
|
|
210
|
+
if (!matchesDisk)
|
|
211
|
+
continue;
|
|
212
|
+
// Parse: "local-lvm:vm-100-disk-0,size=32G" or "local:iso/file.iso,media=cdrom"
|
|
213
|
+
const colonIdx = value.indexOf(':');
|
|
214
|
+
if (colonIdx === -1)
|
|
215
|
+
continue;
|
|
216
|
+
const storage = value.slice(0, colonIdx);
|
|
217
|
+
const rest = value.slice(colonIdx + 1);
|
|
218
|
+
const commaIdx = rest.indexOf(',');
|
|
219
|
+
const volPart = commaIdx > -1 ? rest.slice(0, commaIdx) : rest;
|
|
220
|
+
const optsPart = commaIdx > -1 ? rest.slice(commaIdx + 1) : '';
|
|
221
|
+
// Extract format from volume name or options
|
|
222
|
+
let format = 'raw';
|
|
223
|
+
if (volPart.endsWith('.qcow2'))
|
|
224
|
+
format = 'qcow2';
|
|
225
|
+
else if (volPart.endsWith('.vmdk'))
|
|
226
|
+
format = 'vmdk';
|
|
227
|
+
else if (optsPart.includes('format=qcow2'))
|
|
228
|
+
format = 'qcow2';
|
|
229
|
+
else if (optsPart.includes('format=vmdk'))
|
|
230
|
+
format = 'vmdk';
|
|
231
|
+
else if (optsPart.includes('format=raw'))
|
|
232
|
+
format = 'raw';
|
|
233
|
+
// Extract size
|
|
234
|
+
const sizeMatch = optsPart.match(/size=(\S+)/);
|
|
235
|
+
const size = sizeMatch?.[1] ?? '';
|
|
236
|
+
disks.push({ key, storage, format, size });
|
|
237
|
+
}
|
|
238
|
+
const warnings = [];
|
|
239
|
+
// Warn if local storage on potentially HA-managed VM
|
|
240
|
+
for (const d of disks) {
|
|
241
|
+
if (d.storage === 'local' || d.storage === 'local-lvm') {
|
|
242
|
+
warnings.push(`Disk ${d.key} uses local storage (${d.storage}) — not shared for HA`);
|
|
243
|
+
}
|
|
244
|
+
}
|
|
245
|
+
return {
|
|
246
|
+
vmid,
|
|
247
|
+
node,
|
|
248
|
+
config: cfg,
|
|
249
|
+
disks,
|
|
250
|
+
warnings,
|
|
251
|
+
};
|
|
252
|
+
};
|
|
253
|
+
const vmSnapshots = async (params, config, credentials, fetchFn) => {
|
|
254
|
+
const vmid = params?.vmid;
|
|
255
|
+
if (vmid == null)
|
|
256
|
+
throw new Error('vmid parameter is required');
|
|
257
|
+
let node = params?.node;
|
|
258
|
+
if (!node) {
|
|
259
|
+
const resolved = await resolveNode(vmid, config, credentials, fetchFn);
|
|
260
|
+
node = resolved.node;
|
|
261
|
+
}
|
|
262
|
+
const data = (await proxmoxGet(`/nodes/${node}/qemu/${vmid}/snapshot`, config, credentials, fetchFn));
|
|
263
|
+
const snapshots = (data.data ?? []).filter((s) => s.name !== 'current');
|
|
264
|
+
const now = Date.now() / 1000;
|
|
265
|
+
const sevenDays = 7 * 24 * 60 * 60;
|
|
266
|
+
const warnings = [];
|
|
267
|
+
for (const s of snapshots) {
|
|
268
|
+
if (s.snaptime && now - s.snaptime > sevenDays) {
|
|
269
|
+
warnings.push(`Snapshot "${s.name}" is older than 7 days`);
|
|
270
|
+
}
|
|
271
|
+
}
|
|
272
|
+
return {
|
|
273
|
+
vmid,
|
|
274
|
+
node,
|
|
275
|
+
snapshots: snapshots.map((s) => ({
|
|
276
|
+
name: s.name,
|
|
277
|
+
description: s.description ?? '',
|
|
278
|
+
snaptime: s.snaptime ?? null,
|
|
279
|
+
parent: s.parent ?? null,
|
|
280
|
+
})),
|
|
281
|
+
warnings,
|
|
282
|
+
};
|
|
283
|
+
};
|
|
284
|
+
const storageContent = async (params, config, credentials, fetchFn) => {
|
|
285
|
+
const node = params?.node;
|
|
286
|
+
const storage = params?.storage;
|
|
287
|
+
if (!node)
|
|
288
|
+
throw new Error('node parameter is required');
|
|
289
|
+
if (!storage)
|
|
290
|
+
throw new Error('storage parameter is required');
|
|
291
|
+
const vmidFilter = params?.vmid;
|
|
292
|
+
const data = (await proxmoxGet(`/nodes/${node}/storage/${storage}/content`, config, credentials, fetchFn));
|
|
293
|
+
let volumes = data.data ?? [];
|
|
294
|
+
if (vmidFilter != null) {
|
|
295
|
+
volumes = volumes.filter((v) => v.vmid === vmidFilter);
|
|
296
|
+
}
|
|
297
|
+
return {
|
|
298
|
+
volumes: volumes.map((v) => ({
|
|
299
|
+
volid: v.volid,
|
|
300
|
+
vmid: v.vmid ?? null,
|
|
301
|
+
size: v.size ?? 0,
|
|
302
|
+
format: v.format ?? null,
|
|
303
|
+
})),
|
|
304
|
+
count: volumes.length,
|
|
305
|
+
};
|
|
306
|
+
};
|
|
307
|
+
const clusterTasks = async (params, config, credentials, fetchFn) => {
|
|
308
|
+
const vmidFilter = params?.vmid;
|
|
309
|
+
const limit = params?.limit || 50;
|
|
310
|
+
const data = (await proxmoxGet('/cluster/tasks', config, credentials, fetchFn));
|
|
311
|
+
let tasks = data.data ?? [];
|
|
312
|
+
if (vmidFilter != null) {
|
|
313
|
+
const vmidStr = String(vmidFilter);
|
|
314
|
+
tasks = tasks.filter((t) => t.id === vmidStr);
|
|
315
|
+
}
|
|
316
|
+
tasks = tasks.slice(0, limit);
|
|
317
|
+
const warnings = [];
|
|
318
|
+
for (const t of tasks) {
|
|
319
|
+
if (t.status && t.status !== 'OK' && t.status !== '' && t.endtime) {
|
|
320
|
+
warnings.push(`Task ${t.type} on ${t.node} failed: ${t.status}`);
|
|
321
|
+
}
|
|
322
|
+
if (t.type === 'qmigrate' && !t.endtime) {
|
|
323
|
+
warnings.push(`Migration in progress on ${t.node}`);
|
|
324
|
+
}
|
|
325
|
+
}
|
|
326
|
+
return {
|
|
327
|
+
tasks: tasks.map((t) => ({
|
|
328
|
+
upid: t.upid,
|
|
329
|
+
type: t.type,
|
|
330
|
+
status: t.status ?? null,
|
|
331
|
+
starttime: t.starttime ?? null,
|
|
332
|
+
endtime: t.endtime ?? null,
|
|
333
|
+
node: t.node,
|
|
334
|
+
user: t.user ?? null,
|
|
335
|
+
})),
|
|
336
|
+
warnings,
|
|
337
|
+
};
|
|
338
|
+
};
|
|
339
|
+
const nodeLvm = async (params, config, credentials, fetchFn) => {
|
|
340
|
+
const node = params?.node;
|
|
341
|
+
if (!node)
|
|
342
|
+
throw new Error('node parameter is required');
|
|
343
|
+
const data = (await proxmoxGet(`/nodes/${node}/disks/lvm`, config, credentials, fetchFn));
|
|
344
|
+
const volumeGroups = data.data ?? [];
|
|
345
|
+
const warnings = [];
|
|
346
|
+
for (const vg of volumeGroups) {
|
|
347
|
+
if (vg.free != null && vg.free === 0) {
|
|
348
|
+
warnings.push(`Volume group ${vg.name} has no free space`);
|
|
349
|
+
}
|
|
350
|
+
}
|
|
351
|
+
return {
|
|
352
|
+
volumeGroups: volumeGroups.map((vg) => ({
|
|
353
|
+
name: vg.name,
|
|
354
|
+
size: vg.size ?? 0,
|
|
355
|
+
free: vg.free ?? 0,
|
|
356
|
+
pvs: vg.pvs ?? 0,
|
|
357
|
+
lvs: vg.lvs ?? 0,
|
|
358
|
+
})),
|
|
359
|
+
warnings,
|
|
360
|
+
};
|
|
361
|
+
};
|
|
362
|
+
const lxcStatus = async (params, config, credentials, fetchFn) => {
|
|
363
|
+
const vmid = params?.vmid;
|
|
364
|
+
if (vmid == null)
|
|
365
|
+
throw new Error('vmid parameter is required');
|
|
366
|
+
let node = params?.node;
|
|
367
|
+
if (!node) {
|
|
368
|
+
const resolved = await resolveNode(vmid, config, credentials, fetchFn);
|
|
369
|
+
if (resolved.type !== 'lxc') {
|
|
370
|
+
throw new Error(`VMID ${vmid} is not an LXC container (type: ${resolved.type})`);
|
|
371
|
+
}
|
|
372
|
+
node = resolved.node;
|
|
373
|
+
}
|
|
374
|
+
const data = (await proxmoxGet(`/nodes/${node}/lxc/${vmid}/status/current`, config, credentials, fetchFn));
|
|
375
|
+
const ct = data.data ?? {};
|
|
376
|
+
const warnings = [];
|
|
377
|
+
if (ct.status === 'stopped') {
|
|
378
|
+
warnings.push('Container is stopped');
|
|
379
|
+
}
|
|
380
|
+
return {
|
|
381
|
+
vmid: ct.vmid ?? vmid,
|
|
382
|
+
name: ct.name ?? null,
|
|
383
|
+
status: ct.status ?? null,
|
|
384
|
+
node,
|
|
385
|
+
uptime: ct.uptime ?? 0,
|
|
386
|
+
cpu: ct.cpu ?? 0,
|
|
387
|
+
mem: ct.mem ?? 0,
|
|
388
|
+
maxmem: ct.maxmem ?? 0,
|
|
389
|
+
disk: ct.disk ?? 0,
|
|
390
|
+
maxdisk: ct.maxdisk ?? 0,
|
|
391
|
+
swap: ct.swap ?? 0,
|
|
392
|
+
maxswap: ct.maxswap ?? 0,
|
|
393
|
+
warnings,
|
|
394
|
+
};
|
|
395
|
+
};
|
|
396
|
+
const lxcConfig = async (params, config, credentials, fetchFn) => {
|
|
397
|
+
const vmid = params?.vmid;
|
|
398
|
+
if (vmid == null)
|
|
399
|
+
throw new Error('vmid parameter is required');
|
|
400
|
+
let node = params?.node;
|
|
401
|
+
if (!node) {
|
|
402
|
+
const resolved = await resolveNode(vmid, config, credentials, fetchFn);
|
|
403
|
+
node = resolved.node;
|
|
404
|
+
}
|
|
405
|
+
const data = (await proxmoxGet(`/nodes/${node}/lxc/${vmid}/config`, config, credentials, fetchFn));
|
|
406
|
+
const cfg = data.data ?? {};
|
|
407
|
+
// Parse rootfs: "local-lvm:subvol-200-disk-0,size=8G"
|
|
408
|
+
let rootfs = null;
|
|
409
|
+
if (typeof cfg.rootfs === 'string') {
|
|
410
|
+
const colonIdx = cfg.rootfs.indexOf(':');
|
|
411
|
+
if (colonIdx > -1) {
|
|
412
|
+
const storage = cfg.rootfs.slice(0, colonIdx);
|
|
413
|
+
const rest = cfg.rootfs.slice(colonIdx + 1);
|
|
414
|
+
const sizeMatch = rest.match(/size=(\S+)/);
|
|
415
|
+
rootfs = { storage, size: sizeMatch?.[1] ?? '' };
|
|
416
|
+
}
|
|
417
|
+
}
|
|
418
|
+
// Parse mp0–mp9 mountpoints
|
|
419
|
+
const mountpoints = [];
|
|
420
|
+
for (const [key, value] of Object.entries(cfg)) {
|
|
421
|
+
if (!/^mp\d+$/.test(key) || typeof value !== 'string')
|
|
422
|
+
continue;
|
|
423
|
+
const colonIdx = value.indexOf(':');
|
|
424
|
+
if (colonIdx === -1)
|
|
425
|
+
continue;
|
|
426
|
+
const storage = value.slice(0, colonIdx);
|
|
427
|
+
const rest = value.slice(colonIdx + 1);
|
|
428
|
+
const mpMatch = rest.match(/mp=([^,]+)/);
|
|
429
|
+
const sizeMatch = rest.match(/size=(\S+)/);
|
|
430
|
+
mountpoints.push({
|
|
431
|
+
key,
|
|
432
|
+
storage,
|
|
433
|
+
mountpoint: mpMatch?.[1] ?? '',
|
|
434
|
+
size: sizeMatch?.[1] ?? '',
|
|
435
|
+
});
|
|
436
|
+
}
|
|
437
|
+
const warnings = [];
|
|
438
|
+
return {
|
|
439
|
+
vmid,
|
|
440
|
+
node,
|
|
441
|
+
config: cfg,
|
|
442
|
+
rootfs,
|
|
443
|
+
mountpoints,
|
|
444
|
+
warnings,
|
|
445
|
+
};
|
|
446
|
+
};
|
|
447
|
+
const clusterResources = async (_params, config, credentials, fetchFn) => {
|
|
448
|
+
const data = (await proxmoxGet('/cluster/resources', config, credentials, fetchFn, {
|
|
449
|
+
type: 'vm',
|
|
450
|
+
}));
|
|
451
|
+
const resources = data.data ?? [];
|
|
452
|
+
return {
|
|
453
|
+
resources: resources.map((r) => ({
|
|
454
|
+
vmid: r.vmid,
|
|
455
|
+
name: r.name ?? null,
|
|
456
|
+
node: r.node,
|
|
457
|
+
type: r.type,
|
|
458
|
+
status: r.status,
|
|
459
|
+
uptime: r.uptime ?? 0,
|
|
460
|
+
cpu: r.cpu ?? 0,
|
|
461
|
+
mem: r.mem ?? 0,
|
|
462
|
+
maxmem: r.maxmem ?? 0,
|
|
463
|
+
hastate: r.hastate ?? null,
|
|
464
|
+
lock: r.lock ?? null,
|
|
465
|
+
})),
|
|
466
|
+
};
|
|
467
|
+
};
|
|
468
|
+
const cephStatus = async (_params, config, credentials, fetchFn) => {
|
|
469
|
+
let statusData;
|
|
470
|
+
try {
|
|
471
|
+
statusData = (await proxmoxGet('/cluster/ceph/status', config, credentials, fetchFn));
|
|
472
|
+
}
|
|
473
|
+
catch (err) {
|
|
474
|
+
const msg = err instanceof Error ? err.message : String(err);
|
|
475
|
+
if (msg.includes('404') || msg.includes('501')) {
|
|
476
|
+
return {
|
|
477
|
+
available: false,
|
|
478
|
+
health: null,
|
|
479
|
+
warnings: ['Ceph is not configured on this cluster'],
|
|
480
|
+
};
|
|
481
|
+
}
|
|
482
|
+
throw err;
|
|
483
|
+
}
|
|
484
|
+
const ceph = statusData.data ?? {};
|
|
485
|
+
const osdmap = ceph.osdmap?.osdmap ?? {};
|
|
486
|
+
const pgmap = ceph.pgmap ?? {};
|
|
487
|
+
const health = ceph.health?.status ?? 'unknown';
|
|
488
|
+
const warnings = [];
|
|
489
|
+
if (health !== 'HEALTH_OK') {
|
|
490
|
+
warnings.push(`Ceph health: ${health}`);
|
|
491
|
+
}
|
|
492
|
+
const osdCount = osdmap.num_osds ?? 0;
|
|
493
|
+
const osdUp = osdmap.num_up_osds ?? 0;
|
|
494
|
+
if (osdCount > 0 && osdUp < osdCount) {
|
|
495
|
+
warnings.push(`${osdCount - osdUp} OSD(s) down`);
|
|
496
|
+
}
|
|
497
|
+
// Try to get OSD details from first node
|
|
498
|
+
let osds = [];
|
|
499
|
+
try {
|
|
500
|
+
// Get nodes list to find first available node
|
|
501
|
+
const nodesData = (await proxmoxGet('/nodes', config, credentials, fetchFn));
|
|
502
|
+
const firstNode = (nodesData.data ?? []).find((n) => n.status === 'online');
|
|
503
|
+
if (firstNode?.node) {
|
|
504
|
+
const osdData = (await proxmoxGet(`/nodes/${firstNode.node}/ceph/osd`, config, credentials, fetchFn));
|
|
505
|
+
osds = (osdData.data ?? []).map((o) => ({
|
|
506
|
+
id: o.id ?? 0,
|
|
507
|
+
name: o.name ?? `osd.${o.id ?? 0}`,
|
|
508
|
+
status: o.up ? 'up' : 'down',
|
|
509
|
+
}));
|
|
510
|
+
}
|
|
511
|
+
}
|
|
512
|
+
catch {
|
|
513
|
+
// OSD detail is best-effort
|
|
514
|
+
}
|
|
515
|
+
return {
|
|
516
|
+
available: true,
|
|
517
|
+
health,
|
|
518
|
+
osdCount,
|
|
519
|
+
osdUp,
|
|
520
|
+
osdIn: osdmap.num_in_osds ?? 0,
|
|
521
|
+
pgStates: (pgmap.pgs_by_state ?? []).map((p) => ({
|
|
522
|
+
state: p.state_name,
|
|
523
|
+
count: p.count,
|
|
524
|
+
})),
|
|
525
|
+
usage: {
|
|
526
|
+
total: pgmap.bytes_total ?? 0,
|
|
527
|
+
used: pgmap.bytes_used ?? 0,
|
|
528
|
+
avail: pgmap.bytes_avail ?? 0,
|
|
529
|
+
},
|
|
530
|
+
osds,
|
|
531
|
+
warnings,
|
|
532
|
+
};
|
|
533
|
+
};
|
|
534
|
+
// --- Pack definition ---
|
|
535
|
+
export const proxmoxPack = {
|
|
536
|
+
manifest: {
|
|
537
|
+
name: 'proxmox',
|
|
538
|
+
type: 'integration',
|
|
539
|
+
version: '0.1.0',
|
|
540
|
+
description: 'Proxmox VE cluster — nodes, VMs, containers, storage, Ceph, and HA status',
|
|
541
|
+
requires: { groups: [], files: [], commands: [] },
|
|
542
|
+
probes: [
|
|
543
|
+
{
|
|
544
|
+
name: 'cluster.status',
|
|
545
|
+
description: 'Cluster status including quorum and node health',
|
|
546
|
+
capability: 'observe',
|
|
547
|
+
params: {},
|
|
548
|
+
timeout: 15000,
|
|
549
|
+
},
|
|
550
|
+
{
|
|
551
|
+
name: 'cluster.ha.status',
|
|
552
|
+
description: 'HA manager status and resource states',
|
|
553
|
+
capability: 'observe',
|
|
554
|
+
params: {},
|
|
555
|
+
timeout: 15000,
|
|
556
|
+
},
|
|
557
|
+
{
|
|
558
|
+
name: 'nodes.list',
|
|
559
|
+
description: 'List all nodes with CPU, memory, and status',
|
|
560
|
+
capability: 'observe',
|
|
561
|
+
params: {},
|
|
562
|
+
timeout: 15000,
|
|
563
|
+
},
|
|
564
|
+
{
|
|
565
|
+
name: 'node.storage',
|
|
566
|
+
description: 'Storage pools for a specific node',
|
|
567
|
+
capability: 'observe',
|
|
568
|
+
params: {
|
|
569
|
+
node: { type: 'string', description: 'Node name', required: true },
|
|
570
|
+
},
|
|
571
|
+
timeout: 15000,
|
|
572
|
+
},
|
|
573
|
+
{
|
|
574
|
+
name: 'vm.status',
|
|
575
|
+
description: 'VM status by VMID (searches cluster-wide)',
|
|
576
|
+
capability: 'observe',
|
|
577
|
+
params: {
|
|
578
|
+
vmid: { type: 'number', description: 'VM ID', required: true },
|
|
579
|
+
},
|
|
580
|
+
timeout: 15000,
|
|
581
|
+
},
|
|
582
|
+
{
|
|
583
|
+
name: 'vm.config',
|
|
584
|
+
description: 'VM configuration with parsed disk entries',
|
|
585
|
+
capability: 'observe',
|
|
586
|
+
params: {
|
|
587
|
+
vmid: { type: 'number', description: 'VM ID', required: true },
|
|
588
|
+
node: {
|
|
589
|
+
type: 'string',
|
|
590
|
+
description: 'Node name (auto-resolved if omitted)',
|
|
591
|
+
required: false,
|
|
592
|
+
},
|
|
593
|
+
},
|
|
594
|
+
timeout: 15000,
|
|
595
|
+
},
|
|
596
|
+
{
|
|
597
|
+
name: 'vm.snapshots',
|
|
598
|
+
description: 'VM snapshots with age warnings',
|
|
599
|
+
capability: 'observe',
|
|
600
|
+
params: {
|
|
601
|
+
vmid: { type: 'number', description: 'VM ID', required: true },
|
|
602
|
+
node: {
|
|
603
|
+
type: 'string',
|
|
604
|
+
description: 'Node name (auto-resolved if omitted)',
|
|
605
|
+
required: false,
|
|
606
|
+
},
|
|
607
|
+
},
|
|
608
|
+
timeout: 15000,
|
|
609
|
+
},
|
|
610
|
+
{
|
|
611
|
+
name: 'storage.content',
|
|
612
|
+
description: 'List volumes in a storage pool',
|
|
613
|
+
capability: 'observe',
|
|
614
|
+
params: {
|
|
615
|
+
node: { type: 'string', description: 'Node name', required: true },
|
|
616
|
+
storage: { type: 'string', description: 'Storage ID', required: true },
|
|
617
|
+
vmid: {
|
|
618
|
+
type: 'number',
|
|
619
|
+
description: 'Filter by VM ID',
|
|
620
|
+
required: false,
|
|
621
|
+
},
|
|
622
|
+
},
|
|
623
|
+
timeout: 15000,
|
|
624
|
+
},
|
|
625
|
+
{
|
|
626
|
+
name: 'cluster.tasks',
|
|
627
|
+
description: 'Recent cluster tasks with failure detection',
|
|
628
|
+
capability: 'observe',
|
|
629
|
+
params: {
|
|
630
|
+
vmid: {
|
|
631
|
+
type: 'number',
|
|
632
|
+
description: 'Filter by VM ID',
|
|
633
|
+
required: false,
|
|
634
|
+
},
|
|
635
|
+
limit: {
|
|
636
|
+
type: 'number',
|
|
637
|
+
description: 'Max results (default: 50)',
|
|
638
|
+
required: false,
|
|
639
|
+
},
|
|
640
|
+
},
|
|
641
|
+
timeout: 15000,
|
|
642
|
+
},
|
|
643
|
+
{
|
|
644
|
+
name: 'node.lvm',
|
|
645
|
+
description: 'LVM volume groups on a node',
|
|
646
|
+
capability: 'observe',
|
|
647
|
+
params: {
|
|
648
|
+
node: { type: 'string', description: 'Node name', required: true },
|
|
649
|
+
},
|
|
650
|
+
timeout: 15000,
|
|
651
|
+
},
|
|
652
|
+
{
|
|
653
|
+
name: 'lxc.status',
|
|
654
|
+
description: 'LXC container status with resource usage',
|
|
655
|
+
capability: 'observe',
|
|
656
|
+
params: {
|
|
657
|
+
vmid: { type: 'number', description: 'Container VMID', required: true },
|
|
658
|
+
node: {
|
|
659
|
+
type: 'string',
|
|
660
|
+
description: 'Node name (auto-resolved if omitted)',
|
|
661
|
+
required: false,
|
|
662
|
+
},
|
|
663
|
+
},
|
|
664
|
+
timeout: 15000,
|
|
665
|
+
},
|
|
666
|
+
{
|
|
667
|
+
name: 'lxc.config',
|
|
668
|
+
description: 'LXC container configuration with parsed mountpoints',
|
|
669
|
+
capability: 'observe',
|
|
670
|
+
params: {
|
|
671
|
+
vmid: { type: 'number', description: 'Container VMID', required: true },
|
|
672
|
+
node: {
|
|
673
|
+
type: 'string',
|
|
674
|
+
description: 'Node name (auto-resolved if omitted)',
|
|
675
|
+
required: false,
|
|
676
|
+
},
|
|
677
|
+
},
|
|
678
|
+
timeout: 15000,
|
|
679
|
+
},
|
|
680
|
+
{
|
|
681
|
+
name: 'ceph.status',
|
|
682
|
+
description: 'Ceph cluster health, OSD status, and usage',
|
|
683
|
+
capability: 'observe',
|
|
684
|
+
params: {},
|
|
685
|
+
timeout: 30000,
|
|
686
|
+
},
|
|
687
|
+
{
|
|
688
|
+
name: 'cluster.resources',
|
|
689
|
+
description: 'List all VMs and containers across the cluster',
|
|
690
|
+
capability: 'observe',
|
|
691
|
+
params: {},
|
|
692
|
+
timeout: 15000,
|
|
693
|
+
},
|
|
694
|
+
],
|
|
695
|
+
runbook: {
|
|
696
|
+
category: 'virtualization',
|
|
697
|
+
probes: ['cluster.status', 'nodes.list', 'ceph.status'],
|
|
698
|
+
parallel: true,
|
|
699
|
+
},
|
|
700
|
+
},
|
|
701
|
+
handlers: {
|
|
702
|
+
'cluster.status': clusterStatus,
|
|
703
|
+
'cluster.ha.status': clusterHaStatus,
|
|
704
|
+
'nodes.list': nodesList,
|
|
705
|
+
'node.storage': nodeStorage,
|
|
706
|
+
'vm.status': vmStatus,
|
|
707
|
+
'vm.config': vmConfig,
|
|
708
|
+
'vm.snapshots': vmSnapshots,
|
|
709
|
+
'storage.content': storageContent,
|
|
710
|
+
'cluster.tasks': clusterTasks,
|
|
711
|
+
'node.lvm': nodeLvm,
|
|
712
|
+
'lxc.status': lxcStatus,
|
|
713
|
+
'lxc.config': lxcConfig,
|
|
714
|
+
'ceph.status': cephStatus,
|
|
715
|
+
'cluster.resources': clusterResources,
|
|
716
|
+
},
|
|
717
|
+
testConnection: async (config, credentials, fetchFn) => {
|
|
718
|
+
try {
|
|
719
|
+
const url = proxmoxUrl(config.endpoint, '/version');
|
|
720
|
+
const headers = {
|
|
721
|
+
Accept: 'application/json',
|
|
722
|
+
...buildAuthHeaders(credentials),
|
|
723
|
+
...config.headers,
|
|
724
|
+
};
|
|
725
|
+
const res = await fetchFn(url, { headers });
|
|
726
|
+
return res.ok;
|
|
727
|
+
}
|
|
728
|
+
catch {
|
|
729
|
+
return false;
|
|
730
|
+
}
|
|
731
|
+
},
|
|
732
|
+
};
|
|
733
|
+
//# sourceMappingURL=proxmox.js.map
|