sbcli-pre 1.2.3__zip → 1.2.5__zip
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {sbcli_pre-1.2.3 → sbcli_pre-1.2.5}/PKG-INFO +20 -5
- {sbcli_pre-1.2.3 → sbcli_pre-1.2.5}/README.md +19 -4
- {sbcli_pre-1.2.3 → sbcli_pre-1.2.5}/env_var +1 -1
- {sbcli_pre-1.2.3 → sbcli_pre-1.2.5}/sbcli_pre.egg-info/PKG-INFO +20 -5
- {sbcli_pre-1.2.3 → sbcli_pre-1.2.5}/sbcli_pre.egg-info/SOURCES.txt +5 -5
- {sbcli_pre-1.2.3 → sbcli_pre-1.2.5}/simplyblock_cli/cli.py +115 -113
- {sbcli_pre-1.2.3 → sbcli_pre-1.2.5}/simplyblock_core/cluster_ops.py +238 -141
- {sbcli_pre-1.2.3 → sbcli_pre-1.2.5}/simplyblock_core/constants.py +7 -5
- {sbcli_pre-1.2.3 → sbcli_pre-1.2.5}/simplyblock_core/controllers/caching_node_controller.py +6 -8
- {sbcli_pre-1.2.3 → sbcli_pre-1.2.5}/simplyblock_core/controllers/cluster_events.py +0 -9
- {sbcli_pre-1.2.3 → sbcli_pre-1.2.5}/simplyblock_core/controllers/device_controller.py +63 -56
- {sbcli_pre-1.2.3 → sbcli_pre-1.2.5}/simplyblock_core/controllers/events_controller.py +3 -5
- {sbcli_pre-1.2.3 → sbcli_pre-1.2.5}/simplyblock_core/controllers/health_controller.py +40 -30
- {sbcli_pre-1.2.3 → sbcli_pre-1.2.5}/simplyblock_core/controllers/lvol_controller.py +36 -42
- {sbcli_pre-1.2.3 → sbcli_pre-1.2.5}/simplyblock_core/controllers/pool_controller.py +4 -8
- {sbcli_pre-1.2.3 → sbcli_pre-1.2.5}/simplyblock_core/controllers/snapshot_controller.py +3 -9
- {sbcli_pre-1.2.3 → sbcli_pre-1.2.5}/simplyblock_core/distr_controller.py +9 -13
- {sbcli_pre-1.2.3 → sbcli_pre-1.2.5}/simplyblock_core/kv_store.py +29 -47
- sbcli_pre-1.2.5/simplyblock_core/mgmt_node_ops.py +80 -0
- sbcli_pre-1.2.5/simplyblock_core/models/deployer.py +62 -0
- {sbcli_pre-1.2.3 → sbcli_pre-1.2.5}/simplyblock_core/models/events.py +1 -9
- {sbcli_pre-1.2.3 → sbcli_pre-1.2.5}/simplyblock_core/models/job_schedule.py +0 -6
- {sbcli_pre-1.2.3 → sbcli_pre-1.2.5}/simplyblock_core/models/nvme_device.py +4 -42
- {sbcli_pre-1.2.3 → sbcli_pre-1.2.5}/simplyblock_core/models/storage_node.py +1 -9
- {sbcli_pre-1.2.3 → sbcli_pre-1.2.5}/simplyblock_core/rpc_client.py +10 -55
- {sbcli_pre-1.2.3 → sbcli_pre-1.2.5}/simplyblock_core/scripts/__init__.py +4 -0
- sbcli_pre-1.2.3/simplyblock_core/scripts/alerting/alert_resources.yaml.j2 → sbcli_pre-1.2.5/simplyblock_core/scripts/alerting/alert_resources.yaml +5 -54
- sbcli_pre-1.2.5/simplyblock_core/scripts/apply_dashboard.sh +22 -0
- {sbcli_pre-1.2.3 → sbcli_pre-1.2.5}/simplyblock_core/scripts/dashboards/cluster.json +1 -1
- {sbcli_pre-1.2.3 → sbcli_pre-1.2.5}/simplyblock_core/scripts/deploy_stack.sh +0 -2
- {sbcli_pre-1.2.3 → sbcli_pre-1.2.5}/simplyblock_core/scripts/docker-compose-swarm-monitoring.yml +13 -22
- {sbcli_pre-1.2.3 → sbcli_pre-1.2.5}/simplyblock_core/scripts/docker-compose-swarm.yml +2 -17
- {sbcli_pre-1.2.3 → sbcli_pre-1.2.5}/simplyblock_core/scripts/haproxy.cfg +0 -15
- {sbcli_pre-1.2.3 → sbcli_pre-1.2.5}/simplyblock_core/scripts/install_deps.sh +0 -1
- {sbcli_pre-1.2.3 → sbcli_pre-1.2.5}/simplyblock_core/services/capacity_and_stats_collector.py +1 -1
- {sbcli_pre-1.2.3 → sbcli_pre-1.2.5}/simplyblock_core/services/device_monitor.py +44 -3
- {sbcli_pre-1.2.3 → sbcli_pre-1.2.5}/simplyblock_core/services/distr_event_collector.py +11 -10
- sbcli_pre-1.2.5/simplyblock_core/services/health_check_service.py +136 -0
- sbcli_pre-1.2.3/simplyblock_core/services/tasks_runner_restart.py → sbcli_pre-1.2.5/simplyblock_core/services/job_tasks.py +46 -93
- {sbcli_pre-1.2.3 → sbcli_pre-1.2.5}/simplyblock_core/services/lvol_monitor.py +1 -1
- {sbcli_pre-1.2.3 → sbcli_pre-1.2.5}/simplyblock_core/services/lvol_stat_collector.py +1 -1
- {sbcli_pre-1.2.3 → sbcli_pre-1.2.5}/simplyblock_core/services/port_stat_collector.py +1 -0
- {sbcli_pre-1.2.3 → sbcli_pre-1.2.5}/simplyblock_core/services/storage_node_monitor.py +44 -49
- {sbcli_pre-1.2.3 → sbcli_pre-1.2.5}/simplyblock_core/snode_client.py +0 -12
- {sbcli_pre-1.2.3 → sbcli_pre-1.2.5}/simplyblock_core/storage_node_ops.py +336 -525
- {sbcli_pre-1.2.3 → sbcli_pre-1.2.5}/simplyblock_core/utils.py +1 -46
- {sbcli_pre-1.2.3 → sbcli_pre-1.2.5}/simplyblock_web/app.py +2 -1
- {sbcli_pre-1.2.3 → sbcli_pre-1.2.5}/simplyblock_web/blueprints/snode_ops.py +25 -103
- {sbcli_pre-1.2.3 → sbcli_pre-1.2.5}/simplyblock_web/blueprints/web_api_cluster.py +43 -20
- sbcli_pre-1.2.5/simplyblock_web/blueprints/web_api_deployer.py +394 -0
- {sbcli_pre-1.2.3 → sbcli_pre-1.2.5}/simplyblock_web/blueprints/web_api_device.py +7 -10
- {sbcli_pre-1.2.3 → sbcli_pre-1.2.5}/simplyblock_web/blueprints/web_api_lvol.py +5 -9
- {sbcli_pre-1.2.3 → sbcli_pre-1.2.5}/simplyblock_web/blueprints/web_api_pool.py +5 -14
- {sbcli_pre-1.2.3 → sbcli_pre-1.2.5}/simplyblock_web/blueprints/web_api_storage_node.py +10 -3
- {sbcli_pre-1.2.3 → sbcli_pre-1.2.5}/simplyblock_web/node_utils.py +2 -0
- {sbcli_pre-1.2.3 → sbcli_pre-1.2.5}/simplyblock_web/utils.py +0 -8
- sbcli_pre-1.2.3/simplyblock_core/controllers/tasks_controller.py +0 -103
- sbcli_pre-1.2.3/simplyblock_core/controllers/tasks_events.py +0 -37
- sbcli_pre-1.2.3/simplyblock_core/mgmt_node_ops.py +0 -205
- sbcli_pre-1.2.3/simplyblock_core/services/health_check_service.py +0 -134
- sbcli_pre-1.2.3/simplyblock_core/services/tasks_runner_migration.py +0 -61
- {sbcli_pre-1.2.3 → sbcli_pre-1.2.5}/pyproject.toml +0 -0
- {sbcli_pre-1.2.3 → sbcli_pre-1.2.5}/sbcli_pre.egg-info/dependency_links.txt +0 -0
- {sbcli_pre-1.2.3 → sbcli_pre-1.2.5}/sbcli_pre.egg-info/entry_points.txt +0 -0
- {sbcli_pre-1.2.3 → sbcli_pre-1.2.5}/sbcli_pre.egg-info/requires.txt +0 -0
- {sbcli_pre-1.2.3 → sbcli_pre-1.2.5}/sbcli_pre.egg-info/top_level.txt +0 -0
- {sbcli_pre-1.2.3 → sbcli_pre-1.2.5}/setup.cfg +0 -0
- {sbcli_pre-1.2.3 → sbcli_pre-1.2.5}/setup.py +0 -0
- {sbcli_pre-1.2.3 → sbcli_pre-1.2.5}/simplyblock_cli/main.py +0 -0
- {sbcli_pre-1.2.3 → sbcli_pre-1.2.5}/simplyblock_core/__init__.py +0 -0
- {sbcli_pre-1.2.3 → sbcli_pre-1.2.5}/simplyblock_core/cnode_client.py +0 -0
- {sbcli_pre-1.2.3 → sbcli_pre-1.2.5}/simplyblock_core/compute_node_ops.py +0 -0
- {sbcli_pre-1.2.3 → sbcli_pre-1.2.5}/simplyblock_core/controllers/__init__.py +0 -0
- {sbcli_pre-1.2.3 → sbcli_pre-1.2.5}/simplyblock_core/controllers/device_events.py +0 -0
- {sbcli_pre-1.2.3 → sbcli_pre-1.2.5}/simplyblock_core/controllers/lvol_events.py +0 -0
- {sbcli_pre-1.2.3 → sbcli_pre-1.2.5}/simplyblock_core/controllers/mgmt_events.py +0 -0
- {sbcli_pre-1.2.3 → sbcli_pre-1.2.5}/simplyblock_core/controllers/pool_events.py +0 -0
- {sbcli_pre-1.2.3 → sbcli_pre-1.2.5}/simplyblock_core/controllers/snapshot_events.py +0 -0
- {sbcli_pre-1.2.3 → sbcli_pre-1.2.5}/simplyblock_core/controllers/storage_events.py +0 -0
- {sbcli_pre-1.2.3 → sbcli_pre-1.2.5}/simplyblock_core/models/__init__.py +0 -0
- {sbcli_pre-1.2.3 → sbcli_pre-1.2.5}/simplyblock_core/models/base_model.py +0 -0
- {sbcli_pre-1.2.3 → sbcli_pre-1.2.5}/simplyblock_core/models/caching_node.py +0 -0
- {sbcli_pre-1.2.3 → sbcli_pre-1.2.5}/simplyblock_core/models/cluster.py +0 -0
- {sbcli_pre-1.2.3 → sbcli_pre-1.2.5}/simplyblock_core/models/compute_node.py +0 -0
- {sbcli_pre-1.2.3 → sbcli_pre-1.2.5}/simplyblock_core/models/global_settings.py +0 -0
- {sbcli_pre-1.2.3 → sbcli_pre-1.2.5}/simplyblock_core/models/iface.py +0 -0
- {sbcli_pre-1.2.3 → sbcli_pre-1.2.5}/simplyblock_core/models/lvol_model.py +0 -0
- {sbcli_pre-1.2.3 → sbcli_pre-1.2.5}/simplyblock_core/models/mgmt_node.py +0 -0
- {sbcli_pre-1.2.3 → sbcli_pre-1.2.5}/simplyblock_core/models/pool.py +0 -0
- {sbcli_pre-1.2.3 → sbcli_pre-1.2.5}/simplyblock_core/models/port_stat.py +0 -0
- {sbcli_pre-1.2.3 → sbcli_pre-1.2.5}/simplyblock_core/models/snapshot.py +0 -0
- {sbcli_pre-1.2.3 → sbcli_pre-1.2.5}/simplyblock_core/models/stats.py +0 -0
- {sbcli_pre-1.2.3 → sbcli_pre-1.2.5}/simplyblock_core/pci_utils.py +0 -0
- {sbcli_pre-1.2.3 → sbcli_pre-1.2.5}/simplyblock_core/scripts/alerting/alert_rules.yaml +0 -0
- {sbcli_pre-1.2.3 → sbcli_pre-1.2.5}/simplyblock_core/scripts/clean_local_storage_deploy.sh +0 -0
- {sbcli_pre-1.2.3 → sbcli_pre-1.2.5}/simplyblock_core/scripts/config_docker.sh +0 -0
- {sbcli_pre-1.2.3 → sbcli_pre-1.2.5}/simplyblock_core/scripts/dashboards/devices.json +0 -0
- {sbcli_pre-1.2.3 → sbcli_pre-1.2.5}/simplyblock_core/scripts/dashboards/lvols.json +0 -0
- {sbcli_pre-1.2.3 → sbcli_pre-1.2.5}/simplyblock_core/scripts/dashboards/node-exporter.json +0 -0
- {sbcli_pre-1.2.3 → sbcli_pre-1.2.5}/simplyblock_core/scripts/dashboards/nodes.json +0 -0
- {sbcli_pre-1.2.3 → sbcli_pre-1.2.5}/simplyblock_core/scripts/dashboards/pools.json +0 -0
- {sbcli_pre-1.2.3 → sbcli_pre-1.2.5}/simplyblock_core/scripts/datasource.yml +0 -0
- {sbcli_pre-1.2.3 → sbcli_pre-1.2.5}/simplyblock_core/scripts/db_config_double.sh +0 -0
- {sbcli_pre-1.2.3 → sbcli_pre-1.2.5}/simplyblock_core/scripts/db_config_single.sh +0 -0
- {sbcli_pre-1.2.3 → sbcli_pre-1.2.5}/simplyblock_core/scripts/prometheus.yml +0 -0
- {sbcli_pre-1.2.3 → sbcli_pre-1.2.5}/simplyblock_core/scripts/run_ssh.sh +0 -0
- {sbcli_pre-1.2.3 → sbcli_pre-1.2.5}/simplyblock_core/scripts/set_db_config.sh +0 -0
- {sbcli_pre-1.2.3 → sbcli_pre-1.2.5}/simplyblock_core/scripts/stack_deploy_wait.sh +0 -0
- {sbcli_pre-1.2.3 → sbcli_pre-1.2.5}/simplyblock_core/services/__init__.py +0 -0
- {sbcli_pre-1.2.3 → sbcli_pre-1.2.5}/simplyblock_core/services/caching_node_monitor.py +0 -0
- {sbcli_pre-1.2.3 → sbcli_pre-1.2.5}/simplyblock_core/services/cap_monitor.py +0 -0
- {sbcli_pre-1.2.3 → sbcli_pre-1.2.5}/simplyblock_core/services/install_service.sh +0 -0
- {sbcli_pre-1.2.3 → sbcli_pre-1.2.5}/simplyblock_core/services/log_agg_service.py +0 -0
- {sbcli_pre-1.2.3 → sbcli_pre-1.2.5}/simplyblock_core/services/mgmt_node_monitor.py +0 -0
- {sbcli_pre-1.2.3 → sbcli_pre-1.2.5}/simplyblock_core/services/remove_service.sh +0 -0
- {sbcli_pre-1.2.3 → sbcli_pre-1.2.5}/simplyblock_core/services/service_template.service +0 -0
- {sbcli_pre-1.2.3 → sbcli_pre-1.2.5}/simplyblock_core/shell_utils.py +0 -0
- {sbcli_pre-1.2.3 → sbcli_pre-1.2.5}/simplyblock_web/__init__.py +0 -0
- {sbcli_pre-1.2.3 → sbcli_pre-1.2.5}/simplyblock_web/auth_middleware.py +0 -0
- {sbcli_pre-1.2.3 → sbcli_pre-1.2.5}/simplyblock_web/blueprints/__init__.py +0 -0
- {sbcli_pre-1.2.3 → sbcli_pre-1.2.5}/simplyblock_web/blueprints/caching_node_ops.py +0 -0
- {sbcli_pre-1.2.3 → sbcli_pre-1.2.5}/simplyblock_web/blueprints/caching_node_ops_k8s.py +0 -0
- {sbcli_pre-1.2.3 → sbcli_pre-1.2.5}/simplyblock_web/blueprints/node_api_basic.py +0 -0
- {sbcli_pre-1.2.3 → sbcli_pre-1.2.5}/simplyblock_web/blueprints/node_api_caching_docker.py +0 -0
- {sbcli_pre-1.2.3 → sbcli_pre-1.2.5}/simplyblock_web/blueprints/node_api_caching_ks.py +0 -0
- {sbcli_pre-1.2.3 → sbcli_pre-1.2.5}/simplyblock_web/blueprints/web_api_caching_node.py +0 -0
- {sbcli_pre-1.2.3 → sbcli_pre-1.2.5}/simplyblock_web/blueprints/web_api_mgmt_node.py +0 -0
- {sbcli_pre-1.2.3 → sbcli_pre-1.2.5}/simplyblock_web/blueprints/web_api_snapshot.py +0 -0
- {sbcli_pre-1.2.3 → sbcli_pre-1.2.5}/simplyblock_web/caching_node_app.py +0 -0
- {sbcli_pre-1.2.3 → sbcli_pre-1.2.5}/simplyblock_web/caching_node_app_k8s.py +0 -0
- {sbcli_pre-1.2.3 → sbcli_pre-1.2.5}/simplyblock_web/node_webapp.py +0 -0
- {sbcli_pre-1.2.3 → sbcli_pre-1.2.5}/simplyblock_web/snode_app.py +0 -0
- {sbcli_pre-1.2.3 → sbcli_pre-1.2.5}/simplyblock_web/static/delete.py +0 -0
- {sbcli_pre-1.2.3 → sbcli_pre-1.2.5}/simplyblock_web/static/deploy.py +0 -0
- {sbcli_pre-1.2.3 → sbcli_pre-1.2.5}/simplyblock_web/static/deploy_cnode.yaml +0 -0
- {sbcli_pre-1.2.3 → sbcli_pre-1.2.5}/simplyblock_web/static/deploy_spdk.yaml +0 -0
- {sbcli_pre-1.2.3 → sbcli_pre-1.2.5}/simplyblock_web/static/is_up.py +0 -0
- {sbcli_pre-1.2.3 → sbcli_pre-1.2.5}/simplyblock_web/static/list_deps.py +0 -0
- {sbcli_pre-1.2.3 → sbcli_pre-1.2.5}/simplyblock_web/static/rpac.yaml +0 -0
- {sbcli_pre-1.2.3 → sbcli_pre-1.2.5}/simplyblock_web/static/tst.py +0 -0
- {sbcli_pre-1.2.3 → sbcli_pre-1.2.5}/simplyblock_web/templates/deploy_spdk.yaml.j2 +0 -0
@@ -1,8 +1,8 @@
|
|
1
1
|
import time
|
2
2
|
import logging
|
3
3
|
|
4
|
-
from simplyblock_core import distr_controller, utils
|
5
|
-
from simplyblock_core.controllers import device_events, lvol_controller
|
4
|
+
from simplyblock_core import distr_controller, utils
|
5
|
+
from simplyblock_core.controllers import device_events, lvol_controller
|
6
6
|
from simplyblock_core.kv_store import DBController
|
7
7
|
from simplyblock_core.models.nvme_device import NVMeDevice
|
8
8
|
from simplyblock_core.rpc_client import RPCClient
|
@@ -16,7 +16,6 @@ def device_set_state(device_id, state):
|
|
16
16
|
dev = db_controller.get_storage_devices(device_id)
|
17
17
|
if not dev:
|
18
18
|
logger.error("device not found")
|
19
|
-
return False
|
20
19
|
|
21
20
|
snode = db_controller.get_storage_node_by_id(dev.node_id)
|
22
21
|
if not snode:
|
@@ -36,7 +35,7 @@ def device_set_state(device_id, state):
|
|
36
35
|
|
37
36
|
old_status = dev.status
|
38
37
|
device.status = state
|
39
|
-
distr_controller.send_dev_status_event(device, device.status)
|
38
|
+
distr_controller.send_dev_status_event(device.cluster_device_order, device.status)
|
40
39
|
snode.write_to_db(db_controller.kv_store)
|
41
40
|
device_events.device_status_change(device, device.status, old_status)
|
42
41
|
return True
|
@@ -79,20 +78,14 @@ def device_set_read_only(device_id):
|
|
79
78
|
|
80
79
|
|
81
80
|
def device_set_online(device_id):
|
82
|
-
|
83
|
-
if ret:
|
84
|
-
logger.info("Adding task to device data migration")
|
85
|
-
task_id = tasks_controller.add_device_mig_task(device_id)
|
86
|
-
if task_id:
|
87
|
-
logger.info(f"Task id: {task_id}")
|
88
|
-
return ret
|
81
|
+
return device_set_state(device_id, NVMeDevice.STATUS_ONLINE)
|
89
82
|
|
90
83
|
|
91
84
|
def get_alceml_name(alceml_id):
|
92
85
|
return f"alceml_{alceml_id}"
|
93
86
|
|
94
87
|
|
95
|
-
def _def_create_device_stack(device_obj, snode
|
88
|
+
def _def_create_device_stack(device_obj, snode):
|
96
89
|
|
97
90
|
rpc_client = RPCClient(
|
98
91
|
snode.mgmt_ip, snode.rpc_port,
|
@@ -105,26 +98,22 @@ def _def_create_device_stack(device_obj, snode, force=False):
|
|
105
98
|
ret = rpc_client.bdev_passtest_create(test_name, device_obj.nvme_bdev)
|
106
99
|
if not ret:
|
107
100
|
logger.error(f"Failed to create bdev: {test_name}")
|
108
|
-
|
109
|
-
return False
|
101
|
+
return False
|
110
102
|
|
111
103
|
alceml_id = device_obj.get_id()
|
112
104
|
alceml_name = get_alceml_name(alceml_id)
|
113
105
|
logger.info(f"adding {alceml_name}")
|
114
|
-
ret = rpc_client.bdev_alceml_create(alceml_name, test_name, alceml_id, pba_init_mode=2
|
115
|
-
dev_cpu_mask=snode.dev_cpu_mask)
|
106
|
+
ret = rpc_client.bdev_alceml_create(alceml_name, test_name, alceml_id, pba_init_mode=2)
|
116
107
|
if not ret:
|
117
108
|
logger.error(f"Failed to create alceml bdev: {alceml_name}")
|
118
|
-
|
119
|
-
return False
|
109
|
+
return False
|
120
110
|
|
121
111
|
# add pass through
|
122
112
|
pt_name = f"{alceml_name}_PT"
|
123
113
|
ret = rpc_client.bdev_PT_NoExcl_create(pt_name, alceml_name)
|
124
114
|
if not ret:
|
125
115
|
logger.error(f"Failed to create pt noexcl bdev: {pt_name}")
|
126
|
-
|
127
|
-
return False
|
116
|
+
return False
|
128
117
|
|
129
118
|
subsystem_nqn = snode.subsystem + ":dev:" + alceml_id
|
130
119
|
logger.info("Creating subsystem %s", subsystem_nqn)
|
@@ -148,13 +137,11 @@ def _def_create_device_stack(device_obj, snode, force=False):
|
|
148
137
|
logger.info(f"Adding {pt_name} to the subsystem")
|
149
138
|
ret = rpc_client.nvmf_subsystem_add_ns(subsystem_nqn, pt_name)
|
150
139
|
|
151
|
-
if
|
152
|
-
ret = rpc_client.bdev_jm_create(device_obj.jm_bdev, device_obj.alceml_bdev
|
153
|
-
dev_cpu_mask=snode.dev_cpu_mask)
|
140
|
+
if device_obj.jm_bdev:
|
141
|
+
ret = rpc_client.bdev_jm_create(device_obj.jm_bdev, device_obj.alceml_bdev)
|
154
142
|
if not ret:
|
155
|
-
logger.error(f"Failed to create
|
156
|
-
|
157
|
-
return False
|
143
|
+
logger.error(f"Failed to create bdev: {device_obj.jm_bdev}")
|
144
|
+
return False
|
158
145
|
|
159
146
|
device_obj.testing_bdev = test_name
|
160
147
|
device_obj.alceml_bdev = alceml_name
|
@@ -187,15 +174,23 @@ def restart_device(device_id, force=False):
|
|
187
174
|
device_obj = dev
|
188
175
|
break
|
189
176
|
|
177
|
+
device_obj.status = 'restarting'
|
178
|
+
snode.write_to_db(db_controller.kv_store)
|
179
|
+
|
190
180
|
logger.info(f"Restarting device {device_id}")
|
191
|
-
device_set_unavailable(device_id)
|
192
181
|
|
193
|
-
ret = _def_create_device_stack(device_obj, snode
|
182
|
+
ret = _def_create_device_stack(device_obj, snode)
|
194
183
|
|
195
184
|
if not ret:
|
196
185
|
logger.error("Failed to create device stack")
|
197
|
-
|
198
|
-
|
186
|
+
device_obj.status = NVMeDevice.STATUS_UNAVAILABLE
|
187
|
+
snode.write_to_db(db_controller.kv_store)
|
188
|
+
return False
|
189
|
+
|
190
|
+
device_obj.io_error = False
|
191
|
+
device_obj.retries_exhausted = False
|
192
|
+
device_obj.status = NVMeDevice.STATUS_ONLINE
|
193
|
+
snode.write_to_db(db_controller.kv_store)
|
199
194
|
|
200
195
|
logger.info("Make other nodes connect to the device")
|
201
196
|
snodes = db_controller.get_storage_nodes()
|
@@ -226,11 +221,10 @@ def restart_device(device_id, force=False):
|
|
226
221
|
node.write_to_db(db_controller.kv_store)
|
227
222
|
time.sleep(3)
|
228
223
|
|
229
|
-
logger.info("
|
230
|
-
|
231
|
-
logger.info("Setting device online")
|
232
|
-
device_set_online(device_id)
|
224
|
+
logger.info("Sending device event")
|
225
|
+
distr_controller.send_dev_status_event(device_obj.cluster_device_order, "online")
|
233
226
|
device_events.device_restarted(device_obj)
|
227
|
+
|
234
228
|
return "Done"
|
235
229
|
|
236
230
|
|
@@ -273,8 +267,15 @@ def device_remove(device_id, force=True):
|
|
273
267
|
device = dev
|
274
268
|
break
|
275
269
|
|
270
|
+
if device.jm_bdev:
|
271
|
+
if snode.lvols:
|
272
|
+
logger.error(f"Failed to remove device: {device.get_id()}, "
|
273
|
+
f"there are LVols that uses JM from this device, delete LVol to continue")
|
274
|
+
# if not force:
|
275
|
+
return False
|
276
|
+
|
276
277
|
logger.info("Sending device event")
|
277
|
-
distr_controller.send_dev_status_event(device,
|
278
|
+
distr_controller.send_dev_status_event(device.cluster_device_order, "removed")
|
278
279
|
|
279
280
|
logger.info("Disconnecting device from all nodes")
|
280
281
|
distr_controller.disconnect_device(device)
|
@@ -290,6 +291,13 @@ def device_remove(device_id, force=True):
|
|
290
291
|
if not force:
|
291
292
|
return False
|
292
293
|
|
294
|
+
if device.jm_bdev:
|
295
|
+
ret = rpc_client.bdev_jm_delete(f"jm_{snode.get_id()}")
|
296
|
+
if not ret:
|
297
|
+
logger.error(f"Failed to remove journal manager: jm_{snode.get_id()}")
|
298
|
+
if not force:
|
299
|
+
return False
|
300
|
+
|
293
301
|
logger.info("Removing device bdevs")
|
294
302
|
ret = rpc_client.bdev_PT_NoExcl_delete(f"{device.alceml_bdev}_PT")
|
295
303
|
if not ret:
|
@@ -397,12 +405,19 @@ def get_device_iostats(device_id, history, records_count=20, parse_sizes=True):
|
|
397
405
|
|
398
406
|
def reset_storage_device(dev_id):
|
399
407
|
db_controller = DBController()
|
400
|
-
device =
|
408
|
+
device = None
|
409
|
+
snode = None
|
410
|
+
for node in db_controller.get_storage_nodes():
|
411
|
+
for dev in node.nvme_devices:
|
412
|
+
if dev.get_id() == dev_id:
|
413
|
+
device = dev
|
414
|
+
snode = node
|
415
|
+
break
|
416
|
+
|
401
417
|
if not device:
|
402
418
|
logger.error(f"Device not found: {dev_id}")
|
403
419
|
return False
|
404
420
|
|
405
|
-
snode = db_controller.get_storage_node_by_id(device.node_id)
|
406
421
|
if not snode:
|
407
422
|
logger.error(f"Node not found {device.node_id}")
|
408
423
|
return False
|
@@ -411,38 +426,30 @@ def reset_storage_device(dev_id):
|
|
411
426
|
logger.error(f"Device status: {device.status} is removed")
|
412
427
|
return False
|
413
428
|
|
414
|
-
logger.info("Setting
|
415
|
-
|
416
|
-
|
417
|
-
|
418
|
-
|
419
|
-
|
420
|
-
if dev.status == NVMeDevice.STATUS_ONLINE and dev.physical_label == device.physical_label:
|
421
|
-
devs.append(dev)
|
422
|
-
device_set_unavailable(dev.get_id())
|
429
|
+
logger.info("Setting device to unavailable")
|
430
|
+
old_status = device.status
|
431
|
+
device.status = NVMeDevice.STATUS_UNAVAILABLE
|
432
|
+
distr_controller.send_dev_status_event(device.cluster_device_order, device.status)
|
433
|
+
snode.write_to_db(db_controller.kv_store)
|
434
|
+
device_events.device_status_change(device, device.status, old_status)
|
423
435
|
|
424
436
|
logger.info("Resetting device")
|
425
437
|
rpc_client = RPCClient(
|
426
438
|
snode.mgmt_ip, snode.rpc_port,
|
427
439
|
snode.rpc_username, snode.rpc_password)
|
428
440
|
|
429
|
-
controller_name = device.
|
441
|
+
controller_name = device.nvme_bdev[:-2]
|
430
442
|
response = rpc_client.reset_device(controller_name)
|
431
443
|
if not response:
|
432
444
|
logger.error(f"Failed to reset NVMe BDev {controller_name}")
|
433
445
|
return False
|
434
|
-
time.sleep(3)
|
435
446
|
|
436
|
-
|
437
|
-
|
438
|
-
|
447
|
+
device.io_error = False
|
448
|
+
device.retries_exhausted = False
|
449
|
+
snode.write_to_db(db_controller.kv_store)
|
439
450
|
|
440
|
-
# set io_error flag False
|
441
|
-
device_set_io_error(dev_id, False)
|
442
|
-
device_set_retries_exhausted(dev_id, False)
|
443
|
-
# set device to online
|
444
|
-
device_set_online(dev_id)
|
445
451
|
device_events.device_reset(device)
|
452
|
+
device_set_online(dev_id)
|
446
453
|
return True
|
447
454
|
|
448
455
|
|
@@ -40,7 +40,7 @@ def log_distr_event(cluster_id, node_id, event_dict):
|
|
40
40
|
ds.uuid = str(uuid.uuid4())
|
41
41
|
ds.cluster_uuid = cluster_id
|
42
42
|
ds.node_id = node_id
|
43
|
-
ds.date =
|
43
|
+
ds.date = int(time.time())
|
44
44
|
ds.domain = DOMAIN_DISTR
|
45
45
|
ds.event_level = EventObj.LEVEL_ERROR
|
46
46
|
ds.caused_by = CAUSED_BY_MONITOR
|
@@ -66,7 +66,7 @@ def log_distr_event(cluster_id, node_id, event_dict):
|
|
66
66
|
|
67
67
|
|
68
68
|
def log_event_cluster(cluster_id, domain, event, db_object, caused_by, message,
|
69
|
-
node_id=None, event_level=EventObj.LEVEL_INFO
|
69
|
+
node_id=None, event_level=EventObj.LEVEL_INFO):
|
70
70
|
"""
|
71
71
|
uuid:
|
72
72
|
cluster_uuid: 1234
|
@@ -83,7 +83,7 @@ def log_event_cluster(cluster_id, domain, event, db_object, caused_by, message,
|
|
83
83
|
ds = EventObj()
|
84
84
|
ds.uuid = str(uuid.uuid4())
|
85
85
|
ds.cluster_uuid = cluster_id
|
86
|
-
ds.date =
|
86
|
+
ds.date = int(time.time())
|
87
87
|
ds.node_id = node_id
|
88
88
|
ds.event_level = event_level
|
89
89
|
|
@@ -93,14 +93,12 @@ def log_event_cluster(cluster_id, domain, event, db_object, caused_by, message,
|
|
93
93
|
ds.object_dict = db_object.get_clean_dict()
|
94
94
|
ds.caused_by = caused_by
|
95
95
|
ds.message = message
|
96
|
-
ds.status = status
|
97
96
|
|
98
97
|
log_event_based_on_level(cluster_id, event, db_object.name, message, caused_by, event_level)
|
99
98
|
|
100
99
|
db_controller = DBController()
|
101
100
|
ds.write_to_db(db_controller.kv_store)
|
102
101
|
|
103
|
-
|
104
102
|
def log_event_based_on_level(cluster_id, event, db_object, message, caused_by, event_level):
|
105
103
|
json_str = json.dumps({
|
106
104
|
"cluster_id": cluster_id,
|
@@ -16,13 +16,11 @@ logger = log.getLogger()
|
|
16
16
|
|
17
17
|
def check_cluster(cluster_id):
|
18
18
|
db_controller = DBController()
|
19
|
-
st = db_controller.
|
19
|
+
st = db_controller.get_storage_nodes()
|
20
20
|
data = []
|
21
|
-
result = True
|
22
21
|
for node in st:
|
23
22
|
# check if node is online, unavailable, restarting
|
24
23
|
ret = check_node(node.get_id(), with_devices=False)
|
25
|
-
result &= ret
|
26
24
|
print("*"*100)
|
27
25
|
data.append({
|
28
26
|
"Kind": "Node",
|
@@ -30,32 +28,30 @@ def check_cluster(cluster_id):
|
|
30
28
|
"Status": "ok" if ret else "failed"
|
31
29
|
})
|
32
30
|
|
33
|
-
|
34
|
-
|
35
|
-
|
36
|
-
|
37
|
-
|
38
|
-
|
39
|
-
|
40
|
-
|
41
|
-
|
42
|
-
|
43
|
-
|
44
|
-
|
45
|
-
|
46
|
-
|
47
|
-
|
48
|
-
|
49
|
-
|
50
|
-
"Status": "ok" if ret else "failed"
|
51
|
-
})
|
31
|
+
for device in db_controller.get_storage_devices():
|
32
|
+
ret = check_device(device.get_id())
|
33
|
+
print("*" * 100)
|
34
|
+
data.append({
|
35
|
+
"Kind": "Device",
|
36
|
+
"UUID": device.get_id(),
|
37
|
+
"Status": "ok" if ret else "failed"
|
38
|
+
})
|
39
|
+
|
40
|
+
for lvol in db_controller.get_lvols():
|
41
|
+
ret = check_lvol(lvol.get_id())
|
42
|
+
print("*" * 100)
|
43
|
+
data.append({
|
44
|
+
"Kind": "LVol",
|
45
|
+
"UUID": lvol.get_id(),
|
46
|
+
"Status": "ok" if ret else "failed"
|
47
|
+
})
|
52
48
|
print(utils.print_table(data))
|
53
|
-
return
|
49
|
+
return True
|
54
50
|
|
55
51
|
|
56
52
|
def _check_node_docker_api(ip):
|
57
53
|
try:
|
58
|
-
node_docker = docker.DockerClient(base_url=f"tcp://{ip}:2375", version="auto"
|
54
|
+
node_docker = docker.DockerClient(base_url=f"tcp://{ip}:2375", version="auto")
|
59
55
|
ret = node_docker.info()
|
60
56
|
if ret:
|
61
57
|
logger.debug(ret)
|
@@ -69,7 +65,7 @@ def _check_node_rpc(rpc_ip, rpc_port, rpc_username, rpc_password):
|
|
69
65
|
try:
|
70
66
|
rpc_client = RPCClient(
|
71
67
|
rpc_ip, rpc_port, rpc_username, rpc_password,
|
72
|
-
timeout=
|
68
|
+
timeout=5, retry=3)
|
73
69
|
ret = rpc_client.get_version()
|
74
70
|
if ret:
|
75
71
|
logger.debug(f"SPDK version: {ret['version']}")
|
@@ -171,7 +167,7 @@ def check_node(node_id, with_devices=True):
|
|
171
167
|
|
172
168
|
def check_device(device_id):
|
173
169
|
db_controller = DBController()
|
174
|
-
device = db_controller.
|
170
|
+
device = db_controller.get_storage_devices(device_id)
|
175
171
|
if not device:
|
176
172
|
logger.error("device not found")
|
177
173
|
return False
|
@@ -196,8 +192,8 @@ def check_device(device_id):
|
|
196
192
|
snode.rpc_username, snode.rpc_password)
|
197
193
|
|
198
194
|
bdevs_stack = [device.nvme_bdev, device.testing_bdev, device.alceml_bdev, device.pt_bdev]
|
199
|
-
|
200
|
-
|
195
|
+
if device.jm_bdev:
|
196
|
+
bdevs_stack.append(device.jm_bdev)
|
201
197
|
logger.info(f"Checking Device: {device_id}, status:{device.status}")
|
202
198
|
problems = 0
|
203
199
|
for bdev in bdevs_stack:
|
@@ -235,7 +231,7 @@ def check_device(device_id):
|
|
235
231
|
|
236
232
|
def check_remote_device(device_id):
|
237
233
|
db_controller = DBController()
|
238
|
-
device = db_controller.
|
234
|
+
device = db_controller.get_storage_devices(device_id)
|
239
235
|
if not device:
|
240
236
|
logger.error("device not found")
|
241
237
|
return False
|
@@ -245,7 +241,7 @@ def check_remote_device(device_id):
|
|
245
241
|
return False
|
246
242
|
|
247
243
|
result = True
|
248
|
-
for node in db_controller.
|
244
|
+
for node in db_controller.get_storage_nodes():
|
249
245
|
if node.status == StorageNode.STATUS_ONLINE:
|
250
246
|
if node.get_id() == snode.get_id():
|
251
247
|
continue
|
@@ -310,6 +306,20 @@ def check_lvol_on_node(lvol_id, node_id):
|
|
310
306
|
logger.exception(e)
|
311
307
|
return False
|
312
308
|
|
309
|
+
# check ndcs+npcs <= online devices
|
310
|
+
# then change its status to offline if fails this check
|
311
|
+
online_devices = 0
|
312
|
+
for node in db_controller.get_storage_nodes():
|
313
|
+
for dev in node.nvme_devices:
|
314
|
+
if dev.status == dev.STATUS_ONLINE:
|
315
|
+
online_devices += 1
|
316
|
+
|
317
|
+
# if lvol.ndcs + lvol.npcs < online_devices:
|
318
|
+
# logger.info(f"Checking Distr ndcs+npcs: {lvol.ndcs}+{lvol.npcs}, online devices: {online_devices} ... ok")
|
319
|
+
# else:
|
320
|
+
# logger.info(f"Checking Distr ndcs+npcs: {lvol.ndcs}+{lvol.npcs}, online devices: {online_devices} ... failed")
|
321
|
+
# passed = False
|
322
|
+
|
313
323
|
return passed
|
314
324
|
|
315
325
|
|
@@ -125,7 +125,7 @@ def validate_add_lvol_func(name, size, host_id_or_name, pool_id_or_name,
|
|
125
125
|
return False, f"Invalid LVol size: {utils.humanbytes(size)} " \
|
126
126
|
f"Pool max size has reached {utils.humanbytes(total)} of {utils.humanbytes(pool.pool_max_size)}"
|
127
127
|
|
128
|
-
for lvol in db_controller.get_lvols(
|
128
|
+
for lvol in db_controller.get_lvols():
|
129
129
|
if lvol.pool_uuid == pool.get_id():
|
130
130
|
if lvol.lvol_name == name:
|
131
131
|
return False, f"LVol name must be unique: {name}"
|
@@ -167,7 +167,7 @@ def validate_add_lvol_func(name, size, host_id_or_name, pool_id_or_name,
|
|
167
167
|
|
168
168
|
|
169
169
|
def get_jm_names(snode):
|
170
|
-
return [snode.
|
170
|
+
return [f"jm_{snode.get_id()}"]
|
171
171
|
|
172
172
|
|
173
173
|
# Deprecated
|
@@ -218,7 +218,7 @@ def add_lvol(name, size, host_id_or_name, pool_id_or_name, use_comp, use_crypto,
|
|
218
218
|
return False, "Storage node has no nvme devices"
|
219
219
|
|
220
220
|
dev_count = 0
|
221
|
-
for node in db_controller.
|
221
|
+
for node in db_controller.get_storage_nodes():
|
222
222
|
if node.status == node.STATUS_ONLINE:
|
223
223
|
for dev in node.nvme_devices:
|
224
224
|
if dev.status == dev.STATUS_ONLINE:
|
@@ -252,7 +252,7 @@ def add_lvol(name, size, host_id_or_name, pool_id_or_name, use_comp, use_crypto,
|
|
252
252
|
distr_npcs = 1
|
253
253
|
else:
|
254
254
|
node_count = 0
|
255
|
-
for node in db_controller.
|
255
|
+
for node in db_controller.get_storage_nodes():
|
256
256
|
if node.status == node.STATUS_ONLINE:
|
257
257
|
node_count += 1
|
258
258
|
if node_count == 3:
|
@@ -265,7 +265,7 @@ def add_lvol(name, size, host_id_or_name, pool_id_or_name, use_comp, use_crypto,
|
|
265
265
|
|
266
266
|
# name, vuid, ndcs, npcs, num_blocks, block_size, alloc_names
|
267
267
|
ret = rpc_client.bdev_distrib_create(f"distr_{name}", vuid, distr_ndcs, distr_npcs, num_blocks, distr_bs, jm_names,
|
268
|
-
distr_chunk_bs
|
268
|
+
distr_chunk_bs)
|
269
269
|
bdev_stack.append({"type": "distr", "name": f"distr_{name}"})
|
270
270
|
if not ret:
|
271
271
|
logger.error("failed to create Distr bdev")
|
@@ -364,8 +364,8 @@ def add_lvol(name, size, host_id_or_name, pool_id_or_name, use_comp, use_crypto,
|
|
364
364
|
return lvol_id, None
|
365
365
|
|
366
366
|
|
367
|
-
def _get_next_3_nodes(
|
368
|
-
snodes = db_controller.
|
367
|
+
def _get_next_3_nodes():
|
368
|
+
snodes = db_controller.get_storage_nodes()
|
369
369
|
online_nodes = []
|
370
370
|
node_stats = {}
|
371
371
|
for node in snodes:
|
@@ -461,7 +461,6 @@ def validate_aes_xts_keys(key1: str, key2: str) -> Tuple[bool, str]:
|
|
461
461
|
|
462
462
|
return True, ""
|
463
463
|
|
464
|
-
|
465
464
|
def add_lvol_ha(name, size, host_id_or_name, ha_type, pool_id_or_name, use_comp, use_crypto,
|
466
465
|
distr_vuid, distr_ndcs, distr_npcs,
|
467
466
|
max_rw_iops, max_rw_mbytes, max_r_mbytes, max_w_mbytes,
|
@@ -484,12 +483,12 @@ def add_lvol_ha(name, size, host_id_or_name, ha_type, pool_id_or_name, use_comp,
|
|
484
483
|
if not pool:
|
485
484
|
return False, f"Pool not found: {pool_id_or_name}"
|
486
485
|
|
487
|
-
cl = db_controller.
|
486
|
+
cl = db_controller.get_clusters()[0]
|
488
487
|
if cl.status not in [cl.STATUS_ACTIVE, cl.STATUS_DEGRADED]:
|
489
488
|
return False, f"Cluster is not active, status: {cl.status}"
|
490
489
|
|
491
490
|
if ha_type == "default":
|
492
|
-
ha_type =
|
491
|
+
ha_type = cl.ha_type
|
493
492
|
|
494
493
|
max_rw_iops = max_rw_iops or 0
|
495
494
|
max_rw_mbytes = max_rw_mbytes or 0
|
@@ -503,13 +502,8 @@ def add_lvol_ha(name, size, host_id_or_name, ha_type, pool_id_or_name, use_comp,
|
|
503
502
|
logger.error(error)
|
504
503
|
return False, error
|
505
504
|
|
506
|
-
cluster_size_prov = 0
|
507
|
-
cluster_size_total = 0
|
508
|
-
for lvol in db_controller.get_lvols(cl.get_id()):
|
509
|
-
cluster_size_prov += lvol.size
|
510
|
-
|
511
505
|
dev_count = 0
|
512
|
-
snodes = db_controller.
|
506
|
+
snodes = db_controller.get_storage_nodes()
|
513
507
|
online_nodes = []
|
514
508
|
for node in snodes:
|
515
509
|
if node.status == node.STATUS_ONLINE:
|
@@ -517,11 +511,6 @@ def add_lvol_ha(name, size, host_id_or_name, ha_type, pool_id_or_name, use_comp,
|
|
517
511
|
for dev in node.nvme_devices:
|
518
512
|
if dev.status == dev.STATUS_ONLINE:
|
519
513
|
dev_count += 1
|
520
|
-
cluster_size_total += dev.size
|
521
|
-
|
522
|
-
if len(online_nodes) == 0:
|
523
|
-
logger.error("No online Storage nodes found")
|
524
|
-
return False, "No online Storage nodes found"
|
525
514
|
|
526
515
|
if dev_count == 0:
|
527
516
|
logger.error("No NVMe devices found in the cluster")
|
@@ -534,6 +523,19 @@ def add_lvol_ha(name, size, host_id_or_name, ha_type, pool_id_or_name, use_comp,
|
|
534
523
|
logger.error("Storage nodes are less than 3 in ha cluster")
|
535
524
|
return False, "Storage nodes are less than 3 in ha cluster"
|
536
525
|
|
526
|
+
if len(online_nodes) == 0:
|
527
|
+
logger.error("No online Storage nodes found")
|
528
|
+
return False, "No online Storage nodes found"
|
529
|
+
|
530
|
+
cluster_size_prov = 0
|
531
|
+
cluster_size_total = 0
|
532
|
+
for lvol in db_controller.get_lvols():
|
533
|
+
cluster_size_prov += lvol.size
|
534
|
+
|
535
|
+
for dev in db_controller.get_storage_devices():
|
536
|
+
if dev.status == NVMeDevice.STATUS_ONLINE:
|
537
|
+
cluster_size_total += dev.size
|
538
|
+
|
537
539
|
cluster_size_prov_util = int(((cluster_size_prov+size) / cluster_size_total) * 100)
|
538
540
|
|
539
541
|
if cl.prov_cap_crit and cl.prov_cap_crit < cluster_size_prov_util:
|
@@ -695,7 +697,8 @@ def add_lvol_ha(name, size, host_id_or_name, ha_type, pool_id_or_name, use_comp,
|
|
695
697
|
lvol.lvol_type += ',compress'
|
696
698
|
lvol.top_bdev = lvol.comp_bdev
|
697
699
|
|
698
|
-
nodes = _get_next_3_nodes(
|
700
|
+
nodes = _get_next_3_nodes()
|
701
|
+
|
699
702
|
if host_node:
|
700
703
|
nodes.insert(0, host_node)
|
701
704
|
else:
|
@@ -758,7 +761,6 @@ def _create_bdev_stack(lvol, snode, ha_comm_addrs, ha_inode_self):
|
|
758
761
|
params['jm_names'] = get_jm_names(snode)
|
759
762
|
params['ha_comm_addrs'] = ha_comm_addrs
|
760
763
|
params['ha_inode_self'] = ha_inode_self
|
761
|
-
params['dev_cpu_mask'] = snode.dev_cpu_mask
|
762
764
|
ret = rpc_client.bdev_distrib_create(**params)
|
763
765
|
if ret:
|
764
766
|
ret = distr_controller.send_cluster_map_to_node(snode)
|
@@ -928,7 +930,7 @@ def delete_lvol_from_node(lvol_id, node_id, clear_data=True):
|
|
928
930
|
# 3- clear alceml devices
|
929
931
|
if clear_data:
|
930
932
|
logger.info(f"Clearing Alceml devices")
|
931
|
-
for node in db_controller.
|
933
|
+
for node in db_controller.get_storage_nodes():
|
932
934
|
if node.status == StorageNode.STATUS_ONLINE:
|
933
935
|
rpc_node = RPCClient(node.mgmt_ip, node.rpc_port, node.rpc_username, node.rpc_password)
|
934
936
|
for dev in node.nvme_devices:
|
@@ -939,7 +941,11 @@ def delete_lvol_from_node(lvol_id, node_id, clear_data=True):
|
|
939
941
|
lvol.write_to_db(db_controller.kv_store)
|
940
942
|
|
941
943
|
# 4- clear JM
|
942
|
-
jm_device =
|
944
|
+
jm_device = None
|
945
|
+
for dev in snode.nvme_devices:
|
946
|
+
if dev.status == NVMeDevice.STATUS_JM:
|
947
|
+
jm_device = dev
|
948
|
+
break
|
943
949
|
ret = rpc_client.alceml_unmap_vuid(jm_device.alceml_bdev, lvol.vuid)
|
944
950
|
if not ret:
|
945
951
|
logger.error(f"Failed to unmap jm alceml {jm_device.alceml_bdev} with vuid {lvol.vuid}")
|
@@ -1015,7 +1021,7 @@ def delete_lvol(id_or_name, force_delete=False):
|
|
1015
1021
|
snap = db_controller.get_snapshot_by_id(lvol.cloned_from_snap)
|
1016
1022
|
if snap.deleted is True:
|
1017
1023
|
lvols_count = 0
|
1018
|
-
for lvol in db_controller.get_lvols():
|
1024
|
+
for lvol in db_controller.get_lvols():
|
1019
1025
|
if lvol.cloned_from_snap == snap.get_id():
|
1020
1026
|
lvols_count += 1
|
1021
1027
|
if lvols_count == 0:
|
@@ -1076,20 +1082,8 @@ def set_lvol(uuid, max_rw_iops, max_rw_mbytes, max_r_mbytes, max_w_mbytes, name=
|
|
1076
1082
|
return True
|
1077
1083
|
|
1078
1084
|
|
1079
|
-
def list_lvols(is_json
|
1080
|
-
lvols =
|
1081
|
-
if cluster_id:
|
1082
|
-
lvols = db_controller.get_lvols(cluster_id)
|
1083
|
-
elif pool_id_or_name:
|
1084
|
-
pool = db_controller.get_pool_by_id(pool_id_or_name)
|
1085
|
-
if not pool:
|
1086
|
-
pool = db_controller.get_pool_by_name(pool_id_or_name)
|
1087
|
-
if pool:
|
1088
|
-
for lv_id in pool.lvols:
|
1089
|
-
lvols.append(db_controller.get_lvol_by_id(lv_id))
|
1090
|
-
else:
|
1091
|
-
lvols = db_controller.get_lvols()
|
1092
|
-
|
1085
|
+
def list_lvols(is_json):
|
1086
|
+
lvols = db_controller.get_lvols()
|
1093
1087
|
data = []
|
1094
1088
|
for lvol in lvols:
|
1095
1089
|
if lvol.deleted is True:
|
@@ -1140,7 +1134,7 @@ def list_lvols_mem(is_json, is_csv):
|
|
1140
1134
|
|
1141
1135
|
def get_lvol(lvol_id_or_name, is_json):
|
1142
1136
|
lvol = None
|
1143
|
-
for lv in db_controller.get_lvols():
|
1137
|
+
for lv in db_controller.get_lvols():
|
1144
1138
|
if lv.get_id() == lvol_id_or_name or lv.lvol_name == lvol_id_or_name:
|
1145
1139
|
lvol = lv
|
1146
1140
|
break
|
@@ -1354,7 +1348,7 @@ def get_cluster_map(lvol_id):
|
|
1354
1348
|
if not ret:
|
1355
1349
|
logger.error(f"Failed to get LVol cluster map: {lvol_id}")
|
1356
1350
|
return False
|
1357
|
-
logger.
|
1351
|
+
logger.info(ret)
|
1358
1352
|
print("*"*100)
|
1359
1353
|
results, is_passed = distr_controller.parse_distr_cluster_map(ret)
|
1360
1354
|
return utils.print_table(results)
|
@@ -23,7 +23,7 @@ def _generate_string(length):
|
|
23
23
|
string.ascii_letters + string.digits) for _ in range(length))
|
24
24
|
|
25
25
|
|
26
|
-
def add_pool(name, pool_max, lvol_max, max_rw_iops, max_rw_mbytes, max_r_mbytes, max_w_mbytes, has_secret
|
26
|
+
def add_pool(name, pool_max, lvol_max, max_rw_iops, max_rw_mbytes, max_r_mbytes, max_w_mbytes, has_secret):
|
27
27
|
|
28
28
|
if not name:
|
29
29
|
logger.error("Pool name is empty!")
|
@@ -34,11 +34,6 @@ def add_pool(name, pool_max, lvol_max, max_rw_iops, max_rw_mbytes, max_r_mbytes,
|
|
34
34
|
logger.error(f"Pool found with the same name: {name}")
|
35
35
|
return False
|
36
36
|
|
37
|
-
cluster = db_controller.get_cluster_by_id(cluster_id)
|
38
|
-
if not cluster:
|
39
|
-
logger.error(f"Cluster not found: {cluster_id}")
|
40
|
-
return False
|
41
|
-
|
42
37
|
pool_max = pool_max or 0
|
43
38
|
lvol_max = lvol_max or 0
|
44
39
|
max_rw_iops = max_rw_iops or 0
|
@@ -51,6 +46,7 @@ def add_pool(name, pool_max, lvol_max, max_rw_iops, max_rw_mbytes, max_r_mbytes,
|
|
51
46
|
logger.error("max_rw_mbytes must be greater than max_w_mbytes and max_r_mbytes")
|
52
47
|
return False
|
53
48
|
|
49
|
+
cluster = db_controller.get_clusters()[0]
|
54
50
|
logger.info("Adding pool")
|
55
51
|
pool = Pool()
|
56
52
|
pool.id = str(uuid.uuid4())
|
@@ -140,8 +136,8 @@ def delete_pool(uuid):
|
|
140
136
|
return True
|
141
137
|
|
142
138
|
|
143
|
-
def list_pools(is_json
|
144
|
-
pools = db_controller.get_pools(
|
139
|
+
def list_pools(is_json):
|
140
|
+
pools = db_controller.get_pools()
|
145
141
|
data = []
|
146
142
|
for pool in pools:
|
147
143
|
data.append({
|