sbcli-pre 1.2.5__zip → 1.2.7__zip
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.7}/PKG-INFO +1 -1
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.7}/env_var +1 -1
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.7}/sbcli_pre.egg-info/PKG-INFO +1 -1
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.7}/sbcli_pre.egg-info/SOURCES.txt +5 -3
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.7}/simplyblock_cli/cli.py +138 -136
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.7}/simplyblock_core/cluster_ops.py +138 -235
- sbcli_pre-1.2.7/simplyblock_core/constants.py +91 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.7}/simplyblock_core/controllers/caching_node_controller.py +8 -6
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.7}/simplyblock_core/controllers/cluster_events.py +9 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.7}/simplyblock_core/controllers/device_controller.py +56 -63
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.7}/simplyblock_core/controllers/events_controller.py +5 -3
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.7}/simplyblock_core/controllers/health_controller.py +30 -40
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.7}/simplyblock_core/controllers/lvol_controller.py +75 -39
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.7}/simplyblock_core/controllers/pool_controller.py +8 -4
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.7}/simplyblock_core/controllers/snapshot_controller.py +36 -3
- sbcli_pre-1.2.7/simplyblock_core/controllers/tasks_controller.py +103 -0
- sbcli_pre-1.2.7/simplyblock_core/controllers/tasks_events.py +37 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.7}/simplyblock_core/distr_controller.py +13 -9
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.7}/simplyblock_core/kv_store.py +62 -20
- sbcli_pre-1.2.7/simplyblock_core/mgmt_node_ops.py +205 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.7}/simplyblock_core/models/events.py +9 -1
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.7}/simplyblock_core/models/job_schedule.py +6 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.7}/simplyblock_core/models/nvme_device.py +42 -4
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.7}/simplyblock_core/models/storage_node.py +14 -2
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.7}/simplyblock_core/rpc_client.py +55 -10
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.7}/simplyblock_core/scripts/__init__.py +0 -4
- sbcli_pre-1.2.5/simplyblock_core/scripts/alerting/alert_resources.yaml → sbcli_pre-1.2.7/simplyblock_core/scripts/alerting/alert_resources.yaml.j2 +54 -5
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.7}/simplyblock_core/scripts/dashboards/cluster.json +1 -1
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.7}/simplyblock_core/scripts/deploy_stack.sh +9 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.7}/simplyblock_core/scripts/docker-compose-swarm-monitoring.yml +32 -15
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.7}/simplyblock_core/scripts/docker-compose-swarm.yml +17 -2
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.7}/simplyblock_core/scripts/haproxy.cfg +15 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.7}/simplyblock_core/scripts/install_deps.sh +3 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.7}/simplyblock_core/scripts/stack_deploy_wait.sh +1 -1
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.7}/simplyblock_core/services/capacity_and_stats_collector.py +1 -1
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.7}/simplyblock_core/services/device_monitor.py +5 -46
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.7}/simplyblock_core/services/distr_event_collector.py +10 -11
- sbcli_pre-1.2.7/simplyblock_core/services/health_check_service.py +134 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.7}/simplyblock_core/services/lvol_monitor.py +1 -1
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.7}/simplyblock_core/services/lvol_stat_collector.py +1 -1
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.7}/simplyblock_core/services/port_stat_collector.py +0 -1
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.7}/simplyblock_core/services/storage_node_monitor.py +49 -44
- sbcli_pre-1.2.7/simplyblock_core/services/tasks_runner_migration.py +61 -0
- sbcli_pre-1.2.5/simplyblock_core/services/job_tasks.py → sbcli_pre-1.2.7/simplyblock_core/services/tasks_runner_restart.py +95 -46
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.7}/simplyblock_core/snode_client.py +12 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.7}/simplyblock_core/storage_node_ops.py +630 -358
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.7}/simplyblock_core/utils.py +126 -1
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.7}/simplyblock_web/blueprints/snode_ops.py +103 -25
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.7}/simplyblock_web/blueprints/web_api_cluster.py +20 -43
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.7}/simplyblock_web/blueprints/web_api_device.py +10 -7
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.7}/simplyblock_web/blueprints/web_api_lvol.py +9 -5
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.7}/simplyblock_web/blueprints/web_api_pool.py +14 -5
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.7}/simplyblock_web/blueprints/web_api_storage_node.py +15 -15
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.7}/simplyblock_web/node_utils.py +0 -2
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.7}/simplyblock_web/utils.py +8 -0
- sbcli_pre-1.2.5/simplyblock_core/constants.py +0 -65
- sbcli_pre-1.2.5/simplyblock_core/mgmt_node_ops.py +0 -80
- sbcli_pre-1.2.5/simplyblock_core/scripts/apply_dashboard.sh +0 -22
- sbcli_pre-1.2.5/simplyblock_core/services/health_check_service.py +0 -136
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.7}/README.md +0 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.7}/pyproject.toml +0 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.7}/sbcli_pre.egg-info/dependency_links.txt +0 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.7}/sbcli_pre.egg-info/entry_points.txt +0 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.7}/sbcli_pre.egg-info/requires.txt +0 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.7}/sbcli_pre.egg-info/top_level.txt +0 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.7}/setup.cfg +0 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.7}/setup.py +0 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.7}/simplyblock_cli/main.py +0 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.7}/simplyblock_core/__init__.py +0 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.7}/simplyblock_core/cnode_client.py +0 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.7}/simplyblock_core/compute_node_ops.py +0 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.7}/simplyblock_core/controllers/__init__.py +0 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.7}/simplyblock_core/controllers/device_events.py +0 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.7}/simplyblock_core/controllers/lvol_events.py +0 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.7}/simplyblock_core/controllers/mgmt_events.py +0 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.7}/simplyblock_core/controllers/pool_events.py +0 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.7}/simplyblock_core/controllers/snapshot_events.py +0 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.7}/simplyblock_core/controllers/storage_events.py +0 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.7}/simplyblock_core/models/__init__.py +0 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.7}/simplyblock_core/models/base_model.py +0 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.7}/simplyblock_core/models/caching_node.py +0 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.7}/simplyblock_core/models/cluster.py +0 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.7}/simplyblock_core/models/compute_node.py +0 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.7}/simplyblock_core/models/deployer.py +0 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.7}/simplyblock_core/models/global_settings.py +0 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.7}/simplyblock_core/models/iface.py +0 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.7}/simplyblock_core/models/lvol_model.py +0 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.7}/simplyblock_core/models/mgmt_node.py +0 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.7}/simplyblock_core/models/pool.py +0 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.7}/simplyblock_core/models/port_stat.py +0 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.7}/simplyblock_core/models/snapshot.py +0 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.7}/simplyblock_core/models/stats.py +0 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.7}/simplyblock_core/pci_utils.py +0 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.7}/simplyblock_core/scripts/alerting/alert_rules.yaml +0 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.7}/simplyblock_core/scripts/clean_local_storage_deploy.sh +0 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.7}/simplyblock_core/scripts/config_docker.sh +0 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.7}/simplyblock_core/scripts/dashboards/devices.json +0 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.7}/simplyblock_core/scripts/dashboards/lvols.json +0 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.7}/simplyblock_core/scripts/dashboards/node-exporter.json +0 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.7}/simplyblock_core/scripts/dashboards/nodes.json +0 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.7}/simplyblock_core/scripts/dashboards/pools.json +0 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.7}/simplyblock_core/scripts/datasource.yml +0 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.7}/simplyblock_core/scripts/db_config_double.sh +0 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.7}/simplyblock_core/scripts/db_config_single.sh +0 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.7}/simplyblock_core/scripts/prometheus.yml +0 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.7}/simplyblock_core/scripts/run_ssh.sh +0 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.7}/simplyblock_core/scripts/set_db_config.sh +0 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.7}/simplyblock_core/services/__init__.py +0 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.7}/simplyblock_core/services/caching_node_monitor.py +0 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.7}/simplyblock_core/services/cap_monitor.py +0 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.7}/simplyblock_core/services/install_service.sh +0 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.7}/simplyblock_core/services/log_agg_service.py +0 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.7}/simplyblock_core/services/mgmt_node_monitor.py +0 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.7}/simplyblock_core/services/remove_service.sh +0 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.7}/simplyblock_core/services/service_template.service +0 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.7}/simplyblock_core/shell_utils.py +0 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.7}/simplyblock_web/__init__.py +0 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.7}/simplyblock_web/app.py +0 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.7}/simplyblock_web/auth_middleware.py +0 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.7}/simplyblock_web/blueprints/__init__.py +0 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.7}/simplyblock_web/blueprints/caching_node_ops.py +0 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.7}/simplyblock_web/blueprints/caching_node_ops_k8s.py +0 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.7}/simplyblock_web/blueprints/node_api_basic.py +0 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.7}/simplyblock_web/blueprints/node_api_caching_docker.py +0 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.7}/simplyblock_web/blueprints/node_api_caching_ks.py +0 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.7}/simplyblock_web/blueprints/web_api_caching_node.py +0 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.7}/simplyblock_web/blueprints/web_api_deployer.py +0 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.7}/simplyblock_web/blueprints/web_api_mgmt_node.py +0 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.7}/simplyblock_web/blueprints/web_api_snapshot.py +0 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.7}/simplyblock_web/caching_node_app.py +0 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.7}/simplyblock_web/caching_node_app_k8s.py +0 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.7}/simplyblock_web/node_webapp.py +0 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.7}/simplyblock_web/snode_app.py +0 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.7}/simplyblock_web/static/delete.py +0 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.7}/simplyblock_web/static/deploy.py +0 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.7}/simplyblock_web/static/deploy_cnode.yaml +0 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.7}/simplyblock_web/static/deploy_spdk.yaml +0 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.7}/simplyblock_web/static/is_up.py +0 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.7}/simplyblock_web/static/list_deps.py +0 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.7}/simplyblock_web/static/rpac.yaml +0 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.7}/simplyblock_web/static/tst.py +0 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.7}/simplyblock_web/templates/deploy_spdk.yaml.j2 +0 -0
@@ -10,7 +10,8 @@ from simplyblock_core import compute_node_ops as compute_ops
|
|
10
10
|
from simplyblock_core import storage_node_ops as storage_ops
|
11
11
|
from simplyblock_core import mgmt_node_ops as mgmt_ops
|
12
12
|
from simplyblock_core import constants
|
13
|
-
from simplyblock_core.controllers import pool_controller, lvol_controller, snapshot_controller, device_controller
|
13
|
+
from simplyblock_core.controllers import pool_controller, lvol_controller, snapshot_controller, device_controller, \
|
14
|
+
tasks_controller
|
14
15
|
from simplyblock_core.controllers import caching_node_controller, health_controller
|
15
16
|
from simplyblock_core.models.pool import Pool
|
16
17
|
|
@@ -38,16 +39,18 @@ class CLIWrapper:
|
|
38
39
|
sub_command.add_argument("cluster_id", help='UUID of the cluster to which the node will belong')
|
39
40
|
sub_command.add_argument("node_ip", help='IP of storage node to add')
|
40
41
|
sub_command.add_argument("ifname", help='Management interface name')
|
41
|
-
sub_command.add_argument("--
|
42
|
+
sub_command.add_argument("--partitions", help='Number of partitions to create per device', type=int, default=0)
|
43
|
+
sub_command.add_argument("--jm-percent", help='Number in percent to use for JM from each device',
|
44
|
+
type=int, default=3, dest='jm_percent')
|
42
45
|
sub_command.add_argument("--data-nics", help='Data interface names', nargs='+', dest='data_nics')
|
43
|
-
sub_command.add_argument("--
|
44
|
-
|
45
|
-
sub_command.add_argument("--
|
46
|
+
sub_command.add_argument("--max-lvol", help='Max lvol per storage node', dest='max_lvol', type=int)
|
47
|
+
sub_command.add_argument("--max-snap", help='Max snapshot per storage node', dest='max_snap', type=int)
|
48
|
+
sub_command.add_argument("--max-prov", help='Max provisioning size of all storage nodes', dest='max_prov')
|
49
|
+
sub_command.add_argument("--number-of-devices", help='Number of devices per storage node if it\'s not supported EC2 instance', dest='number_of_devices', type=int)
|
50
|
+
|
46
51
|
sub_command.add_argument("--spdk-image", help='SPDK image uri', dest='spdk_image')
|
47
52
|
sub_command.add_argument("--spdk-debug", help='Enable spdk debug logs', dest='spdk_debug', required=False, action='store_true')
|
48
53
|
|
49
|
-
sub_command.add_argument("--iobuf_small_pool_count", help='bdev_set_options param', dest='small_pool_count', type=int, default=0)
|
50
|
-
sub_command.add_argument("--iobuf_large_pool_count", help='bdev_set_options param', dest='large_pool_count', type=int, default=0)
|
51
54
|
sub_command.add_argument("--iobuf_small_bufsize", help='bdev_set_options param', dest='small_bufsize', type=int, default=0)
|
52
55
|
sub_command.add_argument("--iobuf_large_bufsize", help='bdev_set_options param', dest='large_bufsize', type=int, default=0)
|
53
56
|
|
@@ -64,7 +67,7 @@ class CLIWrapper:
|
|
64
67
|
dest='force_migrate', required=False, action='store_true')
|
65
68
|
# List all storage nodes
|
66
69
|
sub_command = self.add_sub_command(subparser, "list", 'List storage nodes')
|
67
|
-
sub_command.add_argument("--cluster-id", help='id of the cluster for which nodes are listed')
|
70
|
+
sub_command.add_argument("--cluster-id", help='id of the cluster for which nodes are listed', dest='cluster_id')
|
68
71
|
sub_command.add_argument("--json", help='Print outputs in json format', action='store_true')
|
69
72
|
|
70
73
|
sub_command = self.add_sub_command(subparser, "get", 'Get storage node info')
|
@@ -72,17 +75,18 @@ class CLIWrapper:
|
|
72
75
|
|
73
76
|
# Restart storage node
|
74
77
|
sub_command = self.add_sub_command(
|
75
|
-
subparser, "restart", 'Restart a storage node
|
78
|
+
subparser, "restart", 'Restart a storage node', usage='All functions and device drivers will be reset. '
|
76
79
|
'During restart, the node does not accept IO. In a high-availability setup, '
|
77
|
-
'this will not impact operations
|
80
|
+
'this will not impact operations')
|
78
81
|
sub_command.add_argument("node_id", help='UUID of storage node')
|
79
|
-
sub_command.add_argument("--
|
80
|
-
sub_command.add_argument("--
|
82
|
+
sub_command.add_argument("--max-lvol", help='Max lvol per storage node', dest='max_lvol', type=int, default=0)
|
83
|
+
sub_command.add_argument("--max-snap", help='Max snapshot per storage node', dest='max_snap', type=int, default=0)
|
84
|
+
sub_command.add_argument("--max-prov", help='Max provisioning size of all storage nodes', dest='max_prov', default="")
|
85
|
+
sub_command.add_argument("--number-of-devices", help='Number of devices per storage node if it\'s not supported EC2 instance', dest='number_of_devices', type=int)
|
86
|
+
|
81
87
|
sub_command.add_argument("--spdk-image", help='SPDK image uri', dest='spdk_image')
|
82
88
|
sub_command.add_argument("--spdk-debug", help='Enable spdk debug logs', dest='spdk_debug', required=False, action='store_true')
|
83
89
|
|
84
|
-
sub_command.add_argument("--iobuf_small_pool_count", help='bdev_set_options param', dest='small_pool_count', type=int, default=0)
|
85
|
-
sub_command.add_argument("--iobuf_large_pool_count", help='bdev_set_options param', dest='large_pool_count', type=int, default=0)
|
86
90
|
sub_command.add_argument("--iobuf_small_bufsize", help='bdev_set_options param', dest='small_bufsize', type=int, default=0)
|
87
91
|
sub_command.add_argument("--iobuf_large_bufsize", help='bdev_set_options param', dest='large_bufsize', type=int, default=0)
|
88
92
|
|
@@ -90,7 +94,7 @@ class CLIWrapper:
|
|
90
94
|
|
91
95
|
# Shutdown storage node
|
92
96
|
sub_command = self.add_sub_command(
|
93
|
-
subparser, "shutdown", 'Shutdown a storage node
|
97
|
+
subparser, "shutdown", 'Shutdown a storage node', usage='Once the command is issued, the node will stop accepting '
|
94
98
|
'IO,but IO, which was previously received, will still be processed. '
|
95
99
|
'In a high-availability setup, this will not impact operations.')
|
96
100
|
sub_command.add_argument("node_id", help='UUID of storage node')
|
@@ -98,7 +102,7 @@ class CLIWrapper:
|
|
98
102
|
|
99
103
|
# Suspend storage node
|
100
104
|
sub_command = self.add_sub_command(
|
101
|
-
subparser, "suspend", 'Suspend a storage node
|
105
|
+
subparser, "suspend", 'Suspend a storage node', usage='The node will stop accepting new IO, but will finish '
|
102
106
|
'processing any IO, which has been received already.')
|
103
107
|
sub_command.add_argument("node_id", help='UUID of storage node')
|
104
108
|
sub_command.add_argument("--force", help='Force node suspend', required=False, action='store_true')
|
@@ -107,13 +111,13 @@ class CLIWrapper:
|
|
107
111
|
sub_command = self.add_sub_command(subparser, "resume", 'Resume a storage node')
|
108
112
|
sub_command.add_argument("node_id", help='UUID of storage node')
|
109
113
|
|
110
|
-
sub_command = self.add_sub_command(subparser, "get-io-stats", '
|
114
|
+
sub_command = self.add_sub_command(subparser, "get-io-stats", 'Get node IO statistics')
|
111
115
|
sub_command.add_argument("node_id", help='Node ID')
|
112
116
|
sub_command.add_argument("--history", help='list history records -one for every 15 minutes- '
|
113
117
|
'for XX days and YY hours -up to 10 days in total-, format: XXdYYh')
|
114
118
|
|
115
119
|
sub_command = self.add_sub_command(
|
116
|
-
subparser, 'get-capacity', '
|
120
|
+
subparser, 'get-capacity', 'Get node capacity statistics')
|
117
121
|
sub_command.add_argument("node_id", help='Node ID')
|
118
122
|
sub_command.add_argument("--history", help='list history records -one for every 15 minutes- '
|
119
123
|
'for XX days and YY hours -up to 10 days in total-, format: XXdYYh')
|
@@ -126,7 +130,7 @@ class CLIWrapper:
|
|
126
130
|
sub_command.add_argument(
|
127
131
|
"--json", help='Print outputs in json format', required=False, action='store_true')
|
128
132
|
|
129
|
-
sub_command = self.add_sub_command(subparser, "device-testing-mode", '
|
133
|
+
sub_command = self.add_sub_command(subparser, "device-testing-mode", 'Set device testing mode')
|
130
134
|
sub_command.add_argument("device_id", help='Device UUID')
|
131
135
|
sub_command.add_argument("mode", help='Testing mode', choices=[
|
132
136
|
'full_pass_through', 'io_error_on_read', 'io_error_on_write',
|
@@ -144,7 +148,7 @@ class CLIWrapper:
|
|
144
148
|
sub_command.add_argument("device_id", help='the devices\'s UUID')
|
145
149
|
|
146
150
|
# Reset storage device
|
147
|
-
sub_command = self.add_sub_command(subparser, "restart-device", '
|
151
|
+
sub_command = self.add_sub_command(subparser, "restart-device", 'Restart storage device',
|
148
152
|
usage="a previously removed or unavailable device may be returned into "
|
149
153
|
"online state. If the device is not physically present, accessible "
|
150
154
|
"or healthy, it will flip back into unavailable state again.")
|
@@ -157,30 +161,29 @@ class CLIWrapper:
|
|
157
161
|
"auto-rebalancing background process in which some cluster "
|
158
162
|
"capacity is re-distributed to this newly added device.")
|
159
163
|
sub_command = self.add_sub_command(
|
160
|
-
subparser, 'remove-device', 'Remove a storage device
|
164
|
+
subparser, 'remove-device', 'Remove a storage device', usage='The device will become unavailable, independently '
|
161
165
|
'if it was physically removed from the server. This function can be used if '
|
162
166
|
'auto-detection of removal did not work or if the device must be maintained '
|
163
167
|
'otherwise while remaining inserted into the server. ')
|
164
168
|
sub_command.add_argument("device_id", help='Storage device ID')
|
165
169
|
sub_command.add_argument("--force", help='Force device remove', required=False, action='store_true')
|
166
170
|
|
167
|
-
sub_command = self.add_sub_command(
|
168
|
-
|
169
|
-
|
170
|
-
|
171
|
-
|
172
|
-
|
173
|
-
|
171
|
+
# sub_command = self.add_sub_command(
|
172
|
+
# subparser, 'set-failed-device', 'Set storage device to failed state. ', usage='This command can be used, '
|
173
|
+
# 'if an administrator believes that the device must be changed, '
|
174
|
+
# 'but its status and health state do not lead to an automatic detection '
|
175
|
+
# 'of the failure state. Attention!!! The failed state is final, all data '
|
176
|
+
# 'on the device will be automatically recovered to other devices '
|
177
|
+
# 'in the cluster. ')
|
174
178
|
|
175
179
|
sub_command = self.add_sub_command(
|
176
|
-
subparser, 'get-capacity-device', '
|
177
|
-
'the device in bytes')
|
180
|
+
subparser, 'get-capacity-device', 'Get device capacity')
|
178
181
|
sub_command.add_argument("device_id", help='Storage device ID')
|
179
182
|
sub_command.add_argument("--history", help='list history records -one for every 15 minutes- '
|
180
183
|
'for XX days and YY hours -up to 10 days in total-, format: XXdYYh')
|
181
184
|
|
182
185
|
sub_command = self.add_sub_command(
|
183
|
-
subparser, 'get-io-stats-device', '
|
186
|
+
subparser, 'get-io-stats-device', 'Get device IO statistics')
|
184
187
|
sub_command.add_argument("device_id", help='Storage device ID')
|
185
188
|
sub_command.add_argument("--history", help='list history records -one for every 15 minutes- '
|
186
189
|
'for XX days and YY hours -up to 10 days in total-, format: XXdYYh')
|
@@ -237,6 +240,23 @@ class CLIWrapper:
|
|
237
240
|
dest='log_del_interval', default='7d')
|
238
241
|
sub_command.add_argument("--metrics-retention-period", help='retention period for prometheus metrics, default: 7d',
|
239
242
|
dest='metrics_retention_period', default='7d')
|
243
|
+
sub_command.add_argument("--contact-point", help='the email or slack webhook url to be used for alerting',
|
244
|
+
dest='contact_point', default='')
|
245
|
+
sub_command.add_argument("--grafana-endpoint", help='the endpoint url for grafana',
|
246
|
+
dest='grafana_endpoint', default='')
|
247
|
+
|
248
|
+
# add cluster
|
249
|
+
sub_command = self.add_sub_command(subparser, 'add', 'Add new cluster')
|
250
|
+
sub_command.add_argument("--blk_size", help='The block size in bytes', type=int, choices=[512, 4096], default=512)
|
251
|
+
sub_command.add_argument("--page_size", help='The size of a data page in bytes', type=int, default=2097152)
|
252
|
+
sub_command.add_argument("--cap-warn", help='Capacity warning level in percent, default=80',
|
253
|
+
type=int, required=False, dest="cap_warn")
|
254
|
+
sub_command.add_argument("--cap-crit", help='Capacity critical level in percent, default=90',
|
255
|
+
type=int, required=False, dest="cap_crit")
|
256
|
+
sub_command.add_argument("--prov-cap-warn", help='Capacity warning level in percent, default=180',
|
257
|
+
type=int, required=False, dest="prov_cap_warn")
|
258
|
+
sub_command.add_argument("--prov-cap-crit", help='Capacity critical level in percent, default=190',
|
259
|
+
type=int, required=False, dest="prov_cap_crit")
|
240
260
|
|
241
261
|
# show cluster list
|
242
262
|
self.add_sub_command(subparser, 'list', 'Show clusters list')
|
@@ -259,36 +279,33 @@ class CLIWrapper:
|
|
259
279
|
sub_command.add_argument("cluster_id", help='the cluster UUID')
|
260
280
|
|
261
281
|
sub_command = self.add_sub_command(
|
262
|
-
subparser, 'get-capacity', '
|
263
|
-
'(in percent and absolute) and provisioned capacity (in percent and absolute) '
|
264
|
-
'in GB in the cluster.')
|
282
|
+
subparser, 'get-capacity', 'Get cluster capacity')
|
265
283
|
sub_command.add_argument("cluster_id", help='the cluster UUID')
|
266
284
|
sub_command.add_argument("--json", help='Print json output', required=False, action='store_true')
|
267
285
|
sub_command.add_argument("--history", help='(XXdYYh), list history records (one for every 15 minutes) '
|
268
286
|
'for XX days and YY hours (up to 10 days in total).')
|
269
287
|
|
270
288
|
sub_command = self.add_sub_command(
|
271
|
-
subparser, 'get-io-stats', '
|
289
|
+
subparser, 'get-io-stats', 'Get cluster IO statistics')
|
272
290
|
sub_command.add_argument("cluster_id", help='the cluster UUID')
|
273
291
|
sub_command.add_argument("--records", help='Number of records, default: 20', type=int, default=20)
|
274
292
|
sub_command.add_argument("--history", help='(XXdYYh), list history records (one for every 15 minutes) '
|
275
293
|
'for XX days and YY hours (up to 10 days in total).')
|
276
294
|
|
277
|
-
sub_command = self.add_sub_command(
|
278
|
-
|
279
|
-
sub_command.add_argument("cluster_id", help='the cluster UUID')
|
295
|
+
# sub_command = self.add_sub_command(
|
296
|
+
# subparser, 'get-cli-ssh-pass', 'returns the ssh password for the CLI ssh connection')
|
297
|
+
# sub_command.add_argument("cluster_id", help='the cluster UUID')
|
280
298
|
|
281
299
|
# get-logs
|
282
300
|
sub_command = self.add_sub_command(subparser, 'get-logs', 'Returns cluster status logs')
|
283
301
|
sub_command.add_argument("cluster_id", help='cluster uuid')
|
284
302
|
|
285
303
|
# get-secret
|
286
|
-
sub_command = self.add_sub_command(subparser, 'get-secret', '
|
304
|
+
sub_command = self.add_sub_command(subparser, 'get-secret', 'Get cluster secret')
|
287
305
|
sub_command.add_argument("cluster_id", help='cluster uuid')
|
288
306
|
|
289
307
|
# set-secret
|
290
|
-
sub_command = self.add_sub_command(subparser, 'upd-secret', 'Updates the secret
|
291
|
-
'one with a new one) and returns the new one.')
|
308
|
+
sub_command = self.add_sub_command(subparser, 'upd-secret', 'Updates the cluster secret')
|
292
309
|
sub_command.add_argument("cluster_id", help='cluster uuid')
|
293
310
|
sub_command.add_argument("secret", help='new 20 characters password')
|
294
311
|
|
@@ -312,6 +329,16 @@ class CLIWrapper:
|
|
312
329
|
sub_command = self.add_sub_command(subparser, "list-tasks", 'List tasks by cluster ID')
|
313
330
|
sub_command.add_argument("cluster_id", help='UUID of the cluster')
|
314
331
|
|
332
|
+
# cancel task
|
333
|
+
sub_command = self.add_sub_command(subparser, "cancel-task", 'Cancel task by ID')
|
334
|
+
sub_command.add_argument("id", help='UUID of the Task')
|
335
|
+
|
336
|
+
# delete cluster
|
337
|
+
sub_command = self.add_sub_command(
|
338
|
+
subparser, 'delete', 'Delete Cluster',
|
339
|
+
usage="This is only possible, if no storage nodes and pools are attached to the cluster")
|
340
|
+
sub_command.add_argument("id", help='cluster UUID')
|
341
|
+
|
315
342
|
|
316
343
|
#
|
317
344
|
# ----------------- lvol -----------------
|
@@ -362,8 +389,9 @@ class CLIWrapper:
|
|
362
389
|
sub_command.add_argument("--max-w-mbytes", help='Maximum Write Mega Bytes Per Second', type=int)
|
363
390
|
|
364
391
|
# list lvols
|
365
|
-
sub_command = self.add_sub_command(subparser, 'list', 'List
|
366
|
-
sub_command.add_argument("--cluster-id", help='List LVols in particular cluster')
|
392
|
+
sub_command = self.add_sub_command(subparser, 'list', 'List LVols')
|
393
|
+
sub_command.add_argument("--cluster-id", help='List LVols in particular cluster', dest="cluster_id")
|
394
|
+
sub_command.add_argument("--pool", help='List LVols in particular Pool ID or name', dest="pool")
|
367
395
|
sub_command.add_argument("--json", help='Print outputs in json format', required=False, action='store_true')
|
368
396
|
|
369
397
|
# Get the size and max_size of the lvol
|
@@ -378,7 +406,7 @@ class CLIWrapper:
|
|
378
406
|
|
379
407
|
# delete lvol
|
380
408
|
sub_command = self.add_sub_command(
|
381
|
-
subparser, 'delete', 'Delete LVol
|
409
|
+
subparser, 'delete', 'Delete LVol', usage='This is only possible, if no more snapshots and non-inflated clones '
|
382
410
|
'of the volume exist. The volume must be suspended before it can be deleted. ')
|
383
411
|
sub_command.add_argument("id", help='LVol id or ids', nargs='+')
|
384
412
|
sub_command.add_argument("--force", help='Force delete LVol from the cluster', required=False,
|
@@ -386,13 +414,13 @@ class CLIWrapper:
|
|
386
414
|
|
387
415
|
# show connection string
|
388
416
|
sub_command = self.add_sub_command(
|
389
|
-
subparser, 'connect', '
|
417
|
+
subparser, 'connect', 'Get lvol connection strings', usage='Multiple connections to the cluster are '
|
390
418
|
'always available for multi-pathing and high-availability.')
|
391
419
|
sub_command.add_argument("id", help='LVol id')
|
392
420
|
|
393
421
|
# lvol resize
|
394
422
|
sub_command = self.add_sub_command(
|
395
|
-
subparser, 'resize', 'Resize LVol
|
423
|
+
subparser, 'resize', 'Resize LVol', usage='The lvol cannot be exceed the maximum size for lvols. It cannot '
|
396
424
|
'exceed total remaining provisioned space in pool. It cannot drop below the '
|
397
425
|
'current utilization.')
|
398
426
|
sub_command.add_argument("id", help='LVol id')
|
@@ -419,23 +447,22 @@ class CLIWrapper:
|
|
419
447
|
|
420
448
|
# lvol get-capacity
|
421
449
|
sub_command = self.add_sub_command(
|
422
|
-
subparser, 'get-capacity',
|
423
|
-
'(in percent and absolute) capacity.')
|
450
|
+
subparser, 'get-capacity',"Get LVol capacity")
|
424
451
|
sub_command.add_argument("id", help='LVol id')
|
425
452
|
sub_command.add_argument("--history", help='(XXdYYh), list history records (one for every 15 minutes) '
|
426
453
|
'for XX days and YY hours (up to 10 days in total).')
|
427
454
|
|
428
455
|
# lvol get-io-stats
|
429
456
|
sub_command = self.add_sub_command(
|
430
|
-
subparser, 'get-io-stats', help="
|
457
|
+
subparser, 'get-io-stats', help="Get LVol IO statistics")
|
431
458
|
sub_command.add_argument("id", help='LVol id')
|
432
459
|
sub_command.add_argument("--history", help='(XXdYYh), list history records (one for every 15 minutes) '
|
433
460
|
'for XX days and YY hours (up to 10 days in total).')
|
434
461
|
|
435
|
-
sub_command = self.add_sub_command(subparser, 'send-cluster-map', 'send
|
462
|
+
sub_command = self.add_sub_command(subparser, 'send-cluster-map', 'send cluster map')
|
436
463
|
sub_command.add_argument("id", help='LVol id')
|
437
464
|
|
438
|
-
sub_command = self.add_sub_command(subparser, 'get-cluster-map', 'get
|
465
|
+
sub_command = self.add_sub_command(subparser, 'get-cluster-map', 'get cluster map')
|
439
466
|
sub_command.add_argument("id", help='LVol id')
|
440
467
|
|
441
468
|
# check lvol
|
@@ -446,7 +473,7 @@ class CLIWrapper:
|
|
446
473
|
# mgmt-node ops
|
447
474
|
subparser = self.add_command('mgmt', 'Management node commands')
|
448
475
|
|
449
|
-
sub_command = self.add_sub_command(subparser, 'add', 'Add Management node to the cluster')
|
476
|
+
sub_command = self.add_sub_command(subparser, 'add', 'Add Management node to the cluster (local run)')
|
450
477
|
sub_command.add_argument("cluster_ip", help='the cluster IP address')
|
451
478
|
sub_command.add_argument("cluster_id", help='the cluster UUID')
|
452
479
|
sub_command.add_argument("ifname", help='Management interface name')
|
@@ -462,6 +489,7 @@ class CLIWrapper:
|
|
462
489
|
# add pool
|
463
490
|
sub_command = self.add_sub_command(subparser, 'add', 'Add a new Pool')
|
464
491
|
sub_command.add_argument("name", help='Pool name')
|
492
|
+
sub_command.add_argument("cluster_id", help='Cluster UUID')
|
465
493
|
sub_command.add_argument("--pool-max", help='Pool maximum size: 20M, 20G, 0(default)', default="0")
|
466
494
|
sub_command.add_argument("--lvol-max", help='LVol maximum size: 20M, 20G, 0(default)', default="0")
|
467
495
|
sub_command.add_argument("--max-rw-iops", help='Maximum Read Write IO Per Second', type=int)
|
@@ -485,7 +513,8 @@ class CLIWrapper:
|
|
485
513
|
# list pools
|
486
514
|
sub_command = self.add_sub_command(subparser, 'list', 'List pools')
|
487
515
|
sub_command.add_argument("--json", help='Print outputs in json format', required=False, action='store_true')
|
488
|
-
sub_command.add_argument("--cluster-id", help='ID of the cluster',
|
516
|
+
sub_command.add_argument("--cluster-id", help='ID of the cluster', dest="cluster_id")
|
517
|
+
|
489
518
|
# get pool
|
490
519
|
sub_command = self.add_sub_command(subparser, 'get', 'get pool details')
|
491
520
|
sub_command.add_argument("id", help='pool uuid')
|
@@ -506,24 +535,21 @@ class CLIWrapper:
|
|
506
535
|
sub_command.add_argument("pool_id", help='pool uuid')
|
507
536
|
|
508
537
|
# get-secret
|
509
|
-
sub_command = self.add_sub_command(subparser, 'get-secret', '
|
538
|
+
sub_command = self.add_sub_command(subparser, 'get-secret', 'Get pool secret')
|
510
539
|
sub_command.add_argument("pool_id", help='pool uuid')
|
511
540
|
|
512
541
|
# get-secret
|
513
|
-
sub_command = self.add_sub_command(subparser, 'upd-secret', 'Updates
|
514
|
-
'one with a new one) and returns the new one.')
|
542
|
+
sub_command = self.add_sub_command(subparser, 'upd-secret', 'Updates pool secret')
|
515
543
|
sub_command.add_argument("pool_id", help='pool uuid')
|
516
544
|
sub_command.add_argument("secret", help='new 20 characters password')
|
517
545
|
|
518
546
|
# get-capacity
|
519
|
-
sub_command = self.add_sub_command(subparser, 'get-capacity', '
|
520
|
-
'and utilized (percent) storage on the Pool.')
|
547
|
+
sub_command = self.add_sub_command(subparser, 'get-capacity', 'Get pool capacity')
|
521
548
|
sub_command.add_argument("pool_id", help='pool uuid')
|
522
549
|
|
523
550
|
# get-io-stats
|
524
551
|
sub_command = self.add_sub_command(
|
525
|
-
subparser, 'get-io-stats', '
|
526
|
-
'(read-IO, write-IO, total-IO, read mbs, write mbs, total mbs).')
|
552
|
+
subparser, 'get-io-stats', 'Get pool IO statistics')
|
527
553
|
sub_command.add_argument("id", help='Pool id')
|
528
554
|
sub_command.add_argument("--history", help='(XXdYYh), list history records (one for every 15 minutes) '
|
529
555
|
'for XX days and YY hours (up to 10 days in total).')
|
@@ -577,7 +603,7 @@ class CLIWrapper:
|
|
577
603
|
sub_command.add_argument("node_id", help='Caching node UUID')
|
578
604
|
sub_command.add_argument("lvol_id", help='LVol UUID')
|
579
605
|
|
580
|
-
sub_command = self.add_sub_command(subparser, 'recreate', 'recreate Caching node bdevs
|
606
|
+
sub_command = self.add_sub_command(subparser, 'recreate', 'recreate Caching node bdevs')
|
581
607
|
sub_command.add_argument("node_id", help='Caching node UUID')
|
582
608
|
|
583
609
|
def init_parser(self):
|
@@ -613,10 +639,13 @@ class CLIWrapper:
|
|
613
639
|
elif sub_command == "deploy-cleaner":
|
614
640
|
ret = storage_ops.deploy_cleaner()
|
615
641
|
|
616
|
-
elif sub_command == "add":
|
617
|
-
ret = self.storage_node_add(args)
|
618
|
-
|
619
642
|
elif sub_command == "add-node":
|
643
|
+
if not args.max_lvol:
|
644
|
+
self.parser.error(f"Mandatory argument '--max-lvol' not provided for {sub_command}")
|
645
|
+
if not args.max_snap:
|
646
|
+
self.parser.error(f"Mandatory argument '--max-snap' not provided for {sub_command}")
|
647
|
+
if not args.max_prov:
|
648
|
+
self.parser.error(f"Mandatory argument '--max-prov' not provided for {sub_command}")
|
620
649
|
cluster_id = args.cluster_id
|
621
650
|
node_ip = args.node_ip
|
622
651
|
ifname = args.ifname
|
@@ -624,34 +653,28 @@ class CLIWrapper:
|
|
624
653
|
spdk_image = args.spdk_image
|
625
654
|
spdk_debug = args.spdk_debug
|
626
655
|
|
627
|
-
small_pool_count = args.small_pool_count
|
628
|
-
large_pool_count = args.large_pool_count
|
629
656
|
small_bufsize = args.small_bufsize
|
630
657
|
large_bufsize = args.large_bufsize
|
658
|
+
num_partitions_per_dev = args.partitions
|
659
|
+
jm_percent = args.jm_percent
|
631
660
|
|
632
|
-
|
633
|
-
|
634
|
-
|
635
|
-
|
636
|
-
|
637
|
-
|
638
|
-
|
639
|
-
spdk_mem = None
|
640
|
-
if args.spdk_mem:
|
641
|
-
spdk_mem = self.parse_size(args.spdk_mem)
|
642
|
-
if spdk_mem < 1 * 1024 * 1024:
|
643
|
-
return f"SPDK memory:{args.spdk_mem} must be larger than 1G"
|
661
|
+
max_lvol = args.max_lvol
|
662
|
+
max_snap = args.max_snap
|
663
|
+
max_prov = self.parse_size(args.max_prov)
|
664
|
+
number_of_devices = args.number_of_devices
|
665
|
+
if max_prov < 1 * 1024 * 1024:
|
666
|
+
return f"Max provisioning memory:{args.max_prov} must be larger than 1G"
|
644
667
|
|
645
668
|
out = storage_ops.add_node(
|
646
|
-
cluster_id, node_ip, ifname, data_nics,
|
647
|
-
|
669
|
+
cluster_id, node_ip, ifname, data_nics, max_lvol, max_snap, max_prov, spdk_image, spdk_debug,
|
670
|
+
small_bufsize, large_bufsize, num_partitions_per_dev, jm_percent, number_of_devices)
|
648
671
|
return out
|
649
672
|
|
650
673
|
elif sub_command == "list":
|
651
|
-
ret = storage_ops.list_storage_nodes(
|
674
|
+
ret = storage_ops.list_storage_nodes(args.json, args.cluster_id)
|
652
675
|
|
653
676
|
elif sub_command == "remove":
|
654
|
-
ret = storage_ops.remove_storage_node(args.node_id, args.force_remove)
|
677
|
+
ret = storage_ops.remove_storage_node(args.node_id, args.force_remove, args.force_migrate)
|
655
678
|
|
656
679
|
elif sub_command == "delete":
|
657
680
|
ret = storage_ops.delete_storage_node(args.node_id)
|
@@ -662,29 +685,17 @@ class CLIWrapper:
|
|
662
685
|
spdk_image = args.spdk_image
|
663
686
|
spdk_debug = args.spdk_debug
|
664
687
|
|
665
|
-
cpu_mask = None
|
666
|
-
if args.spdk_cpu_mask:
|
667
|
-
if self.validate_cpu_mask(args.spdk_cpu_mask):
|
668
|
-
cpu_mask = args.spdk_cpu_mask
|
669
|
-
else:
|
670
|
-
return f"Invalid cpu mask value: {args.spdk_cpu_mask}"
|
671
|
-
|
672
|
-
spdk_mem = None
|
673
|
-
if args.spdk_mem:
|
674
|
-
spdk_mem = self.parse_size(args.spdk_mem)
|
675
|
-
if spdk_mem < 1 * 1024 * 1024:
|
676
|
-
return f"SPDK memory:{args.spdk_mem} must be larger than 1G"
|
677
688
|
|
689
|
+
max_lvol = args.max_lvol
|
690
|
+
max_snap = args.max_snap
|
691
|
+
max_prov = self.parse_size(args.max_prov)
|
678
692
|
|
679
|
-
small_pool_count = args.small_pool_count
|
680
|
-
large_pool_count = args.large_pool_count
|
681
693
|
small_bufsize = args.small_bufsize
|
682
694
|
large_bufsize = args.large_bufsize
|
683
695
|
|
684
696
|
ret = storage_ops.restart_storage_node(
|
685
|
-
node_id,
|
697
|
+
node_id, max_lvol, max_snap, max_prov,
|
686
698
|
spdk_image, spdk_debug,
|
687
|
-
small_pool_count, large_pool_count,
|
688
699
|
small_bufsize, large_bufsize)
|
689
700
|
|
690
701
|
elif sub_command == "list-devices":
|
@@ -793,8 +804,8 @@ class CLIWrapper:
|
|
793
804
|
sub_command = args_dict[args.command]
|
794
805
|
if sub_command == 'create':
|
795
806
|
ret = self.cluster_create(args)
|
796
|
-
elif sub_command == '
|
797
|
-
ret = self.
|
807
|
+
elif sub_command == 'add':
|
808
|
+
ret = self.cluster_add(args)
|
798
809
|
elif sub_command == 'status':
|
799
810
|
cluster_id = args.cluster_id
|
800
811
|
ret = cluster_ops.show_cluster(cluster_id)
|
@@ -844,7 +855,10 @@ class CLIWrapper:
|
|
844
855
|
ret = cluster_ops.update_cluster(args.id)
|
845
856
|
|
846
857
|
elif sub_command == "list-tasks":
|
847
|
-
ret =
|
858
|
+
ret = tasks_controller.list_tasks(args.cluster_id)
|
859
|
+
|
860
|
+
elif sub_command == "cancel-task":
|
861
|
+
ret = tasks_controller.cancel_task(args.id)
|
848
862
|
|
849
863
|
elif sub_command == "graceful-shutdown":
|
850
864
|
ret = cluster_ops.cluster_grace_shutdown(args.id)
|
@@ -852,6 +866,9 @@ class CLIWrapper:
|
|
852
866
|
elif sub_command == "graceful-startup":
|
853
867
|
ret = cluster_ops.cluster_grace_startup(args.id)
|
854
868
|
|
869
|
+
elif sub_command == "delete":
|
870
|
+
ret = cluster_ops.delete_cluster(args.id)
|
871
|
+
|
855
872
|
else:
|
856
873
|
self.parser.print_help()
|
857
874
|
|
@@ -896,7 +913,7 @@ class CLIWrapper:
|
|
896
913
|
args.id, args.max_rw_iops, args.max_rw_mbytes,
|
897
914
|
args.max_r_mbytes, args.max_w_mbytes)
|
898
915
|
elif sub_command == "list":
|
899
|
-
ret = lvol_controller.list_lvols(args.json)
|
916
|
+
ret = lvol_controller.list_lvols(args.json, args.cluster_id, args.pool)
|
900
917
|
elif sub_command == "list-mem":
|
901
918
|
ret = lvol_controller.list_lvols_mem(args.json, args.csv)
|
902
919
|
elif sub_command == "get":
|
@@ -955,7 +972,7 @@ class CLIWrapper:
|
|
955
972
|
cluster_id = args.cluster_id
|
956
973
|
cluster_ip = args.cluster_ip
|
957
974
|
ifname = args.ifname
|
958
|
-
ret =
|
975
|
+
ret = mgmt_ops.deploy_mgmt_node(cluster_ip, cluster_id, ifname)
|
959
976
|
elif sub_command == "list":
|
960
977
|
ret = mgmt_ops.list_mgmt_nodes(args.json)
|
961
978
|
elif sub_command == "remove":
|
@@ -974,7 +991,9 @@ class CLIWrapper:
|
|
974
991
|
args.max_rw_mbytes,
|
975
992
|
args.max_r_mbytes,
|
976
993
|
args.max_w_mbytes,
|
977
|
-
args.has_secret
|
994
|
+
args.has_secret,
|
995
|
+
args.cluster_id
|
996
|
+
)
|
978
997
|
|
979
998
|
elif sub_command == "set":
|
980
999
|
pool_max = None
|
@@ -996,7 +1015,7 @@ class CLIWrapper:
|
|
996
1015
|
ret = pool_controller.get_pool(args.id, args.json)
|
997
1016
|
|
998
1017
|
elif sub_command == "list":
|
999
|
-
ret = pool_controller.list_pools(args.json)
|
1018
|
+
ret = pool_controller.list_pools(args.json, args.cluster_id)
|
1000
1019
|
|
1001
1020
|
elif sub_command == "delete":
|
1002
1021
|
ret = pool_controller.delete_pool(args.id)
|
@@ -1092,14 +1111,6 @@ class CLIWrapper:
|
|
1092
1111
|
out = storage_ops.list_storage_nodes(self.db_store, args.json)
|
1093
1112
|
return out
|
1094
1113
|
|
1095
|
-
def storage_node_add(self, args):
|
1096
|
-
cluster_id = args.cluster_id
|
1097
|
-
ifname = args.ifname
|
1098
|
-
data_nics = args.data_nics
|
1099
|
-
# TODO: Validate the inputs
|
1100
|
-
out = storage_ops.add_storage_node(cluster_id, ifname, data_nics)
|
1101
|
-
return out
|
1102
|
-
|
1103
1114
|
def storage_node_list_devices(self, args):
|
1104
1115
|
node_id = args.node_id
|
1105
1116
|
sort = args.sort
|
@@ -1109,6 +1120,17 @@ class CLIWrapper:
|
|
1109
1120
|
out = storage_ops.list_storage_devices(self.db_store, node_id, sort, is_json)
|
1110
1121
|
return out
|
1111
1122
|
|
1123
|
+
def cluster_add(self, args):
|
1124
|
+
page_size_in_blocks = args.page_size
|
1125
|
+
blk_size = args.blk_size
|
1126
|
+
cap_warn = args.cap_warn
|
1127
|
+
cap_crit = args.cap_crit
|
1128
|
+
prov_cap_warn = args.prov_cap_warn
|
1129
|
+
prov_cap_crit = args.prov_cap_crit
|
1130
|
+
|
1131
|
+
return cluster_ops.add_cluster(
|
1132
|
+
blk_size, page_size_in_blocks, cap_warn, cap_crit, prov_cap_warn, prov_cap_crit)
|
1133
|
+
|
1112
1134
|
def cluster_create(self, args):
|
1113
1135
|
page_size_in_blocks = args.page_size
|
1114
1136
|
blk_size = args.blk_size
|
@@ -1120,33 +1142,13 @@ class CLIWrapper:
|
|
1120
1142
|
ifname = args.ifname
|
1121
1143
|
log_del_interval = args.log_del_interval
|
1122
1144
|
metrics_retention_period = args.metrics_retention_period
|
1145
|
+
contact_point = args.contact_point
|
1146
|
+
grafana_endpoint = args.grafana_endpoint
|
1123
1147
|
|
1124
|
-
# TODO: Validate the inputs
|
1125
1148
|
return cluster_ops.create_cluster(
|
1126
1149
|
blk_size, page_size_in_blocks,
|
1127
1150
|
CLI_PASS, cap_warn, cap_crit, prov_cap_warn, prov_cap_crit,
|
1128
|
-
ifname, log_del_interval, metrics_retention_period)
|
1129
|
-
|
1130
|
-
def cluster_join(self, args):
|
1131
|
-
cluster_id = args.cluster_id
|
1132
|
-
cluster_ip = args.cluster_ip
|
1133
|
-
role = args.role
|
1134
|
-
ifname = args.ifname
|
1135
|
-
data_nics = args.data_nics
|
1136
|
-
spdk_cpu_mask = None
|
1137
|
-
if args.spdk_cpu_mask:
|
1138
|
-
if self.validate_cpu_mask(args.spdk_cpu_mask):
|
1139
|
-
spdk_cpu_mask = args.spdk_cpu_mask
|
1140
|
-
else:
|
1141
|
-
return f"Invalid cpu mask value: {args.spdk_cpu_mask}"
|
1142
|
-
|
1143
|
-
spdk_mem = None
|
1144
|
-
if args.spdk_mem:
|
1145
|
-
spdk_mem = self.parse_size(args.spdk_mem)
|
1146
|
-
if spdk_mem < 1 * 1024 * 1024:
|
1147
|
-
return f"SPDK memory:{args.spdk_mem} must be larger than 1G"
|
1148
|
-
|
1149
|
-
return cluster_ops.join_cluster(cluster_ip, cluster_id, role, ifname, data_nics, spdk_cpu_mask, spdk_mem)
|
1151
|
+
ifname, log_del_interval, metrics_retention_period, contact_point, grafana_endpoint)
|
1150
1152
|
|
1151
1153
|
def query_yes_no(self, question, default="yes"):
|
1152
1154
|
"""Ask a yes/no question via raw_input() and return their answer.
|