sbcli-pre 1.0.9__zip → 1.1.1__zip
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {sbcli_pre-1.0.9 → sbcli_pre-1.1.1}/PKG-INFO +1 -1
- {sbcli_pre-1.0.9 → sbcli_pre-1.1.1}/env_var +1 -1
- {sbcli_pre-1.0.9 → sbcli_pre-1.1.1}/sbcli_pre.egg-info/PKG-INFO +1 -1
- {sbcli_pre-1.0.9 → sbcli_pre-1.1.1}/simplyblock_cli/cli.py +39 -68
- {sbcli_pre-1.0.9 → sbcli_pre-1.1.1}/simplyblock_core/constants.py +1 -1
- {sbcli_pre-1.0.9 → sbcli_pre-1.1.1}/simplyblock_core/controllers/device_controller.py +4 -2
- {sbcli_pre-1.0.9 → sbcli_pre-1.1.1}/simplyblock_core/controllers/lvol_controller.py +2 -1
- {sbcli_pre-1.0.9 → sbcli_pre-1.1.1}/simplyblock_core/controllers/snapshot_controller.py +3 -2
- sbcli_pre-1.1.1/simplyblock_core/controllers/tasks_controller.py +70 -0
- {sbcli_pre-1.0.9 → sbcli_pre-1.1.1}/simplyblock_core/models/storage_node.py +4 -0
- {sbcli_pre-1.0.9 → sbcli_pre-1.1.1}/simplyblock_core/rpc_client.py +29 -3
- {sbcli_pre-1.0.9 → sbcli_pre-1.1.1}/simplyblock_core/services/tasks_runner_migration.py +1 -1
- {sbcli_pre-1.0.9 → sbcli_pre-1.1.1}/simplyblock_core/services/tasks_runner_restart.py +41 -14
- {sbcli_pre-1.0.9 → sbcli_pre-1.1.1}/simplyblock_core/storage_node_ops.py +89 -11
- {sbcli_pre-1.0.9 → sbcli_pre-1.1.1}/simplyblock_core/utils.py +46 -1
- {sbcli_pre-1.0.9 → sbcli_pre-1.1.1}/simplyblock_web/blueprints/snode_ops.py +1 -14
- {sbcli_pre-1.0.9 → sbcli_pre-1.1.1}/simplyblock_web/blueprints/web_api_storage_node.py +1 -9
- sbcli_pre-1.0.9/simplyblock_core/controllers/tasks_controller.py +0 -54
- {sbcli_pre-1.0.9 → sbcli_pre-1.1.1}/README.md +0 -0
- {sbcli_pre-1.0.9 → sbcli_pre-1.1.1}/pyproject.toml +0 -0
- {sbcli_pre-1.0.9 → sbcli_pre-1.1.1}/sbcli_pre.egg-info/SOURCES.txt +0 -0
- {sbcli_pre-1.0.9 → sbcli_pre-1.1.1}/sbcli_pre.egg-info/dependency_links.txt +0 -0
- {sbcli_pre-1.0.9 → sbcli_pre-1.1.1}/sbcli_pre.egg-info/entry_points.txt +0 -0
- {sbcli_pre-1.0.9 → sbcli_pre-1.1.1}/sbcli_pre.egg-info/requires.txt +0 -0
- {sbcli_pre-1.0.9 → sbcli_pre-1.1.1}/sbcli_pre.egg-info/top_level.txt +0 -0
- {sbcli_pre-1.0.9 → sbcli_pre-1.1.1}/setup.cfg +0 -0
- {sbcli_pre-1.0.9 → sbcli_pre-1.1.1}/setup.py +0 -0
- {sbcli_pre-1.0.9 → sbcli_pre-1.1.1}/simplyblock_cli/main.py +0 -0
- {sbcli_pre-1.0.9 → sbcli_pre-1.1.1}/simplyblock_core/__init__.py +0 -0
- {sbcli_pre-1.0.9 → sbcli_pre-1.1.1}/simplyblock_core/cluster_ops.py +0 -0
- {sbcli_pre-1.0.9 → sbcli_pre-1.1.1}/simplyblock_core/cnode_client.py +0 -0
- {sbcli_pre-1.0.9 → sbcli_pre-1.1.1}/simplyblock_core/compute_node_ops.py +0 -0
- {sbcli_pre-1.0.9 → sbcli_pre-1.1.1}/simplyblock_core/controllers/__init__.py +0 -0
- {sbcli_pre-1.0.9 → sbcli_pre-1.1.1}/simplyblock_core/controllers/caching_node_controller.py +0 -0
- {sbcli_pre-1.0.9 → sbcli_pre-1.1.1}/simplyblock_core/controllers/cluster_events.py +0 -0
- {sbcli_pre-1.0.9 → sbcli_pre-1.1.1}/simplyblock_core/controllers/device_events.py +0 -0
- {sbcli_pre-1.0.9 → sbcli_pre-1.1.1}/simplyblock_core/controllers/events_controller.py +0 -0
- {sbcli_pre-1.0.9 → sbcli_pre-1.1.1}/simplyblock_core/controllers/health_controller.py +0 -0
- {sbcli_pre-1.0.9 → sbcli_pre-1.1.1}/simplyblock_core/controllers/lvol_events.py +0 -0
- {sbcli_pre-1.0.9 → sbcli_pre-1.1.1}/simplyblock_core/controllers/mgmt_events.py +0 -0
- {sbcli_pre-1.0.9 → sbcli_pre-1.1.1}/simplyblock_core/controllers/pool_controller.py +0 -0
- {sbcli_pre-1.0.9 → sbcli_pre-1.1.1}/simplyblock_core/controllers/pool_events.py +0 -0
- {sbcli_pre-1.0.9 → sbcli_pre-1.1.1}/simplyblock_core/controllers/snapshot_events.py +0 -0
- {sbcli_pre-1.0.9 → sbcli_pre-1.1.1}/simplyblock_core/controllers/storage_events.py +0 -0
- {sbcli_pre-1.0.9 → sbcli_pre-1.1.1}/simplyblock_core/controllers/tasks_events.py +0 -0
- {sbcli_pre-1.0.9 → sbcli_pre-1.1.1}/simplyblock_core/distr_controller.py +0 -0
- {sbcli_pre-1.0.9 → sbcli_pre-1.1.1}/simplyblock_core/kv_store.py +0 -0
- {sbcli_pre-1.0.9 → sbcli_pre-1.1.1}/simplyblock_core/mgmt_node_ops.py +0 -0
- {sbcli_pre-1.0.9 → sbcli_pre-1.1.1}/simplyblock_core/models/__init__.py +0 -0
- {sbcli_pre-1.0.9 → sbcli_pre-1.1.1}/simplyblock_core/models/base_model.py +0 -0
- {sbcli_pre-1.0.9 → sbcli_pre-1.1.1}/simplyblock_core/models/caching_node.py +0 -0
- {sbcli_pre-1.0.9 → sbcli_pre-1.1.1}/simplyblock_core/models/cluster.py +0 -0
- {sbcli_pre-1.0.9 → sbcli_pre-1.1.1}/simplyblock_core/models/compute_node.py +0 -0
- {sbcli_pre-1.0.9 → sbcli_pre-1.1.1}/simplyblock_core/models/events.py +0 -0
- {sbcli_pre-1.0.9 → sbcli_pre-1.1.1}/simplyblock_core/models/global_settings.py +0 -0
- {sbcli_pre-1.0.9 → sbcli_pre-1.1.1}/simplyblock_core/models/iface.py +0 -0
- {sbcli_pre-1.0.9 → sbcli_pre-1.1.1}/simplyblock_core/models/job_schedule.py +0 -0
- {sbcli_pre-1.0.9 → sbcli_pre-1.1.1}/simplyblock_core/models/lvol_model.py +0 -0
- {sbcli_pre-1.0.9 → sbcli_pre-1.1.1}/simplyblock_core/models/mgmt_node.py +0 -0
- {sbcli_pre-1.0.9 → sbcli_pre-1.1.1}/simplyblock_core/models/nvme_device.py +0 -0
- {sbcli_pre-1.0.9 → sbcli_pre-1.1.1}/simplyblock_core/models/pool.py +0 -0
- {sbcli_pre-1.0.9 → sbcli_pre-1.1.1}/simplyblock_core/models/port_stat.py +0 -0
- {sbcli_pre-1.0.9 → sbcli_pre-1.1.1}/simplyblock_core/models/snapshot.py +0 -0
- {sbcli_pre-1.0.9 → sbcli_pre-1.1.1}/simplyblock_core/models/stats.py +0 -0
- {sbcli_pre-1.0.9 → sbcli_pre-1.1.1}/simplyblock_core/pci_utils.py +0 -0
- {sbcli_pre-1.0.9 → sbcli_pre-1.1.1}/simplyblock_core/scripts/__init__.py +0 -0
- {sbcli_pre-1.0.9 → sbcli_pre-1.1.1}/simplyblock_core/scripts/alerting/alert_resources.yaml +0 -0
- {sbcli_pre-1.0.9 → sbcli_pre-1.1.1}/simplyblock_core/scripts/alerting/alert_rules.yaml +0 -0
- {sbcli_pre-1.0.9 → sbcli_pre-1.1.1}/simplyblock_core/scripts/clean_local_storage_deploy.sh +0 -0
- {sbcli_pre-1.0.9 → sbcli_pre-1.1.1}/simplyblock_core/scripts/config_docker.sh +0 -0
- {sbcli_pre-1.0.9 → sbcli_pre-1.1.1}/simplyblock_core/scripts/dashboards/cluster.json +0 -0
- {sbcli_pre-1.0.9 → sbcli_pre-1.1.1}/simplyblock_core/scripts/dashboards/devices.json +0 -0
- {sbcli_pre-1.0.9 → sbcli_pre-1.1.1}/simplyblock_core/scripts/dashboards/lvols.json +0 -0
- {sbcli_pre-1.0.9 → sbcli_pre-1.1.1}/simplyblock_core/scripts/dashboards/node-exporter.json +0 -0
- {sbcli_pre-1.0.9 → sbcli_pre-1.1.1}/simplyblock_core/scripts/dashboards/nodes.json +0 -0
- {sbcli_pre-1.0.9 → sbcli_pre-1.1.1}/simplyblock_core/scripts/dashboards/pools.json +0 -0
- {sbcli_pre-1.0.9 → sbcli_pre-1.1.1}/simplyblock_core/scripts/datasource.yml +0 -0
- {sbcli_pre-1.0.9 → sbcli_pre-1.1.1}/simplyblock_core/scripts/db_config_double.sh +0 -0
- {sbcli_pre-1.0.9 → sbcli_pre-1.1.1}/simplyblock_core/scripts/db_config_single.sh +0 -0
- {sbcli_pre-1.0.9 → sbcli_pre-1.1.1}/simplyblock_core/scripts/deploy_stack.sh +0 -0
- {sbcli_pre-1.0.9 → sbcli_pre-1.1.1}/simplyblock_core/scripts/docker-compose-swarm-monitoring.yml +0 -0
- {sbcli_pre-1.0.9 → sbcli_pre-1.1.1}/simplyblock_core/scripts/docker-compose-swarm.yml +0 -0
- {sbcli_pre-1.0.9 → sbcli_pre-1.1.1}/simplyblock_core/scripts/haproxy.cfg +0 -0
- {sbcli_pre-1.0.9 → sbcli_pre-1.1.1}/simplyblock_core/scripts/install_deps.sh +0 -0
- {sbcli_pre-1.0.9 → sbcli_pre-1.1.1}/simplyblock_core/scripts/prometheus.yml +0 -0
- {sbcli_pre-1.0.9 → sbcli_pre-1.1.1}/simplyblock_core/scripts/run_ssh.sh +0 -0
- {sbcli_pre-1.0.9 → sbcli_pre-1.1.1}/simplyblock_core/scripts/set_db_config.sh +0 -0
- {sbcli_pre-1.0.9 → sbcli_pre-1.1.1}/simplyblock_core/scripts/stack_deploy_wait.sh +0 -0
- {sbcli_pre-1.0.9 → sbcli_pre-1.1.1}/simplyblock_core/services/__init__.py +0 -0
- {sbcli_pre-1.0.9 → sbcli_pre-1.1.1}/simplyblock_core/services/caching_node_monitor.py +0 -0
- {sbcli_pre-1.0.9 → sbcli_pre-1.1.1}/simplyblock_core/services/cap_monitor.py +0 -0
- {sbcli_pre-1.0.9 → sbcli_pre-1.1.1}/simplyblock_core/services/capacity_and_stats_collector.py +0 -0
- {sbcli_pre-1.0.9 → sbcli_pre-1.1.1}/simplyblock_core/services/device_monitor.py +0 -0
- {sbcli_pre-1.0.9 → sbcli_pre-1.1.1}/simplyblock_core/services/distr_event_collector.py +0 -0
- {sbcli_pre-1.0.9 → sbcli_pre-1.1.1}/simplyblock_core/services/health_check_service.py +0 -0
- {sbcli_pre-1.0.9 → sbcli_pre-1.1.1}/simplyblock_core/services/install_service.sh +0 -0
- {sbcli_pre-1.0.9 → sbcli_pre-1.1.1}/simplyblock_core/services/log_agg_service.py +0 -0
- {sbcli_pre-1.0.9 → sbcli_pre-1.1.1}/simplyblock_core/services/lvol_monitor.py +0 -0
- {sbcli_pre-1.0.9 → sbcli_pre-1.1.1}/simplyblock_core/services/lvol_stat_collector.py +0 -0
- {sbcli_pre-1.0.9 → sbcli_pre-1.1.1}/simplyblock_core/services/mgmt_node_monitor.py +0 -0
- {sbcli_pre-1.0.9 → sbcli_pre-1.1.1}/simplyblock_core/services/port_stat_collector.py +0 -0
- {sbcli_pre-1.0.9 → sbcli_pre-1.1.1}/simplyblock_core/services/remove_service.sh +0 -0
- {sbcli_pre-1.0.9 → sbcli_pre-1.1.1}/simplyblock_core/services/service_template.service +0 -0
- {sbcli_pre-1.0.9 → sbcli_pre-1.1.1}/simplyblock_core/services/storage_node_monitor.py +0 -0
- {sbcli_pre-1.0.9 → sbcli_pre-1.1.1}/simplyblock_core/shell_utils.py +0 -0
- {sbcli_pre-1.0.9 → sbcli_pre-1.1.1}/simplyblock_core/snode_client.py +0 -0
- {sbcli_pre-1.0.9 → sbcli_pre-1.1.1}/simplyblock_web/__init__.py +0 -0
- {sbcli_pre-1.0.9 → sbcli_pre-1.1.1}/simplyblock_web/app.py +0 -0
- {sbcli_pre-1.0.9 → sbcli_pre-1.1.1}/simplyblock_web/auth_middleware.py +0 -0
- {sbcli_pre-1.0.9 → sbcli_pre-1.1.1}/simplyblock_web/blueprints/__init__.py +0 -0
- {sbcli_pre-1.0.9 → sbcli_pre-1.1.1}/simplyblock_web/blueprints/caching_node_ops.py +0 -0
- {sbcli_pre-1.0.9 → sbcli_pre-1.1.1}/simplyblock_web/blueprints/caching_node_ops_k8s.py +0 -0
- {sbcli_pre-1.0.9 → sbcli_pre-1.1.1}/simplyblock_web/blueprints/node_api_basic.py +0 -0
- {sbcli_pre-1.0.9 → sbcli_pre-1.1.1}/simplyblock_web/blueprints/node_api_caching_docker.py +0 -0
- {sbcli_pre-1.0.9 → sbcli_pre-1.1.1}/simplyblock_web/blueprints/node_api_caching_ks.py +0 -0
- {sbcli_pre-1.0.9 → sbcli_pre-1.1.1}/simplyblock_web/blueprints/web_api_caching_node.py +0 -0
- {sbcli_pre-1.0.9 → sbcli_pre-1.1.1}/simplyblock_web/blueprints/web_api_cluster.py +0 -0
- {sbcli_pre-1.0.9 → sbcli_pre-1.1.1}/simplyblock_web/blueprints/web_api_device.py +0 -0
- {sbcli_pre-1.0.9 → sbcli_pre-1.1.1}/simplyblock_web/blueprints/web_api_lvol.py +0 -0
- {sbcli_pre-1.0.9 → sbcli_pre-1.1.1}/simplyblock_web/blueprints/web_api_mgmt_node.py +0 -0
- {sbcli_pre-1.0.9 → sbcli_pre-1.1.1}/simplyblock_web/blueprints/web_api_pool.py +0 -0
- {sbcli_pre-1.0.9 → sbcli_pre-1.1.1}/simplyblock_web/blueprints/web_api_snapshot.py +0 -0
- {sbcli_pre-1.0.9 → sbcli_pre-1.1.1}/simplyblock_web/caching_node_app.py +0 -0
- {sbcli_pre-1.0.9 → sbcli_pre-1.1.1}/simplyblock_web/caching_node_app_k8s.py +0 -0
- {sbcli_pre-1.0.9 → sbcli_pre-1.1.1}/simplyblock_web/node_utils.py +0 -0
- {sbcli_pre-1.0.9 → sbcli_pre-1.1.1}/simplyblock_web/node_webapp.py +0 -0
- {sbcli_pre-1.0.9 → sbcli_pre-1.1.1}/simplyblock_web/snode_app.py +0 -0
- {sbcli_pre-1.0.9 → sbcli_pre-1.1.1}/simplyblock_web/static/delete.py +0 -0
- {sbcli_pre-1.0.9 → sbcli_pre-1.1.1}/simplyblock_web/static/deploy.py +0 -0
- {sbcli_pre-1.0.9 → sbcli_pre-1.1.1}/simplyblock_web/static/deploy_cnode.yaml +0 -0
- {sbcli_pre-1.0.9 → sbcli_pre-1.1.1}/simplyblock_web/static/deploy_spdk.yaml +0 -0
- {sbcli_pre-1.0.9 → sbcli_pre-1.1.1}/simplyblock_web/static/is_up.py +0 -0
- {sbcli_pre-1.0.9 → sbcli_pre-1.1.1}/simplyblock_web/static/list_deps.py +0 -0
- {sbcli_pre-1.0.9 → sbcli_pre-1.1.1}/simplyblock_web/static/rpac.yaml +0 -0
- {sbcli_pre-1.0.9 → sbcli_pre-1.1.1}/simplyblock_web/static/tst.py +0 -0
- {sbcli_pre-1.0.9 → sbcli_pre-1.1.1}/simplyblock_web/templates/deploy_spdk.yaml.j2 +0 -0
- {sbcli_pre-1.0.9 → sbcli_pre-1.1.1}/simplyblock_web/utils.py +0 -0
@@ -42,8 +42,6 @@ class CLIWrapper:
|
|
42
42
|
sub_command.add_argument("--jm-percent", help='Number in percent to use for JM from each device',
|
43
43
|
type=int, default=3, dest='jm_percent')
|
44
44
|
sub_command.add_argument("--data-nics", help='Data interface names', nargs='+', dest='data_nics')
|
45
|
-
sub_command.add_argument("--cpu-mask", help='SPDK app CPU mask, default is all cores found',
|
46
|
-
dest='spdk_cpu_mask')
|
47
45
|
sub_command.add_argument("--memory", help='SPDK huge memory allocation, default is 4G', dest='spdk_mem')
|
48
46
|
sub_command.add_argument("--spdk-image", help='SPDK image uri', dest='spdk_image')
|
49
47
|
sub_command.add_argument("--spdk-debug", help='Enable spdk debug logs', dest='spdk_debug', required=False, action='store_true')
|
@@ -57,10 +55,6 @@ class CLIWrapper:
|
|
57
55
|
sub_command = self.add_sub_command(subparser, "delete", 'Delete storage node obj')
|
58
56
|
sub_command.add_argument("node_id", help='UUID of storage node')
|
59
57
|
|
60
|
-
# remove storage node
|
61
|
-
sub_command = self.add_sub_command(subparser, "delete", 'Delete storage node')
|
62
|
-
sub_command.add_argument("node_id", help='UUID of storage node')
|
63
|
-
|
64
58
|
# remove storage node
|
65
59
|
sub_command = self.add_sub_command(subparser, "remove", 'Remove storage node')
|
66
60
|
sub_command.add_argument("node_id", help='UUID of storage node')
|
@@ -78,11 +72,10 @@ class CLIWrapper:
|
|
78
72
|
|
79
73
|
# Restart storage node
|
80
74
|
sub_command = self.add_sub_command(
|
81
|
-
subparser, "restart", 'Restart a storage node
|
75
|
+
subparser, "restart", 'Restart a storage node', usage='All functions and device drivers will be reset. '
|
82
76
|
'During restart, the node does not accept IO. In a high-availability setup, '
|
83
|
-
'this will not impact operations
|
77
|
+
'this will not impact operations')
|
84
78
|
sub_command.add_argument("node_id", help='UUID of storage node')
|
85
|
-
sub_command.add_argument("--cpu-mask", help='SPDK app CPU mask, default is all cores found', dest='spdk_cpu_mask')
|
86
79
|
sub_command.add_argument("--memory", help='SPDK huge memory allocation, default is 4G', dest='spdk_mem')
|
87
80
|
sub_command.add_argument("--spdk-image", help='SPDK image uri', dest='spdk_image')
|
88
81
|
sub_command.add_argument("--spdk-debug", help='Enable spdk debug logs', dest='spdk_debug', required=False, action='store_true')
|
@@ -96,7 +89,7 @@ class CLIWrapper:
|
|
96
89
|
|
97
90
|
# Shutdown storage node
|
98
91
|
sub_command = self.add_sub_command(
|
99
|
-
subparser, "shutdown", 'Shutdown a storage node
|
92
|
+
subparser, "shutdown", 'Shutdown a storage node', usage='Once the command is issued, the node will stop accepting '
|
100
93
|
'IO,but IO, which was previously received, will still be processed. '
|
101
94
|
'In a high-availability setup, this will not impact operations.')
|
102
95
|
sub_command.add_argument("node_id", help='UUID of storage node')
|
@@ -104,7 +97,7 @@ class CLIWrapper:
|
|
104
97
|
|
105
98
|
# Suspend storage node
|
106
99
|
sub_command = self.add_sub_command(
|
107
|
-
subparser, "suspend", 'Suspend a storage node
|
100
|
+
subparser, "suspend", 'Suspend a storage node', usage='The node will stop accepting new IO, but will finish '
|
108
101
|
'processing any IO, which has been received already.')
|
109
102
|
sub_command.add_argument("node_id", help='UUID of storage node')
|
110
103
|
sub_command.add_argument("--force", help='Force node suspend', required=False, action='store_true')
|
@@ -113,13 +106,13 @@ class CLIWrapper:
|
|
113
106
|
sub_command = self.add_sub_command(subparser, "resume", 'Resume a storage node')
|
114
107
|
sub_command.add_argument("node_id", help='UUID of storage node')
|
115
108
|
|
116
|
-
sub_command = self.add_sub_command(subparser, "get-io-stats", '
|
109
|
+
sub_command = self.add_sub_command(subparser, "get-io-stats", 'Get node IO statistics')
|
117
110
|
sub_command.add_argument("node_id", help='Node ID')
|
118
111
|
sub_command.add_argument("--history", help='list history records -one for every 15 minutes- '
|
119
112
|
'for XX days and YY hours -up to 10 days in total-, format: XXdYYh')
|
120
113
|
|
121
114
|
sub_command = self.add_sub_command(
|
122
|
-
subparser, 'get-capacity', '
|
115
|
+
subparser, 'get-capacity', 'Get node capacity statistics')
|
123
116
|
sub_command.add_argument("node_id", help='Node ID')
|
124
117
|
sub_command.add_argument("--history", help='list history records -one for every 15 minutes- '
|
125
118
|
'for XX days and YY hours -up to 10 days in total-, format: XXdYYh')
|
@@ -132,7 +125,7 @@ class CLIWrapper:
|
|
132
125
|
sub_command.add_argument(
|
133
126
|
"--json", help='Print outputs in json format', required=False, action='store_true')
|
134
127
|
|
135
|
-
sub_command = self.add_sub_command(subparser, "device-testing-mode", '
|
128
|
+
sub_command = self.add_sub_command(subparser, "device-testing-mode", 'Set device testing mode')
|
136
129
|
sub_command.add_argument("device_id", help='Device UUID')
|
137
130
|
sub_command.add_argument("mode", help='Testing mode', choices=[
|
138
131
|
'full_pass_through', 'io_error_on_read', 'io_error_on_write',
|
@@ -150,7 +143,7 @@ class CLIWrapper:
|
|
150
143
|
sub_command.add_argument("device_id", help='the devices\'s UUID')
|
151
144
|
|
152
145
|
# Reset storage device
|
153
|
-
sub_command = self.add_sub_command(subparser, "restart-device", '
|
146
|
+
sub_command = self.add_sub_command(subparser, "restart-device", 'Restart storage device',
|
154
147
|
usage="a previously removed or unavailable device may be returned into "
|
155
148
|
"online state. If the device is not physically present, accessible "
|
156
149
|
"or healthy, it will flip back into unavailable state again.")
|
@@ -163,30 +156,29 @@ class CLIWrapper:
|
|
163
156
|
"auto-rebalancing background process in which some cluster "
|
164
157
|
"capacity is re-distributed to this newly added device.")
|
165
158
|
sub_command = self.add_sub_command(
|
166
|
-
subparser, 'remove-device', 'Remove a storage device
|
159
|
+
subparser, 'remove-device', 'Remove a storage device', usage='The device will become unavailable, independently '
|
167
160
|
'if it was physically removed from the server. This function can be used if '
|
168
161
|
'auto-detection of removal did not work or if the device must be maintained '
|
169
162
|
'otherwise while remaining inserted into the server. ')
|
170
163
|
sub_command.add_argument("device_id", help='Storage device ID')
|
171
164
|
sub_command.add_argument("--force", help='Force device remove', required=False, action='store_true')
|
172
165
|
|
173
|
-
sub_command = self.add_sub_command(
|
174
|
-
|
175
|
-
|
176
|
-
|
177
|
-
|
178
|
-
|
179
|
-
|
166
|
+
# sub_command = self.add_sub_command(
|
167
|
+
# subparser, 'set-failed-device', 'Set storage device to failed state. ', usage='This command can be used, '
|
168
|
+
# 'if an administrator believes that the device must be changed, '
|
169
|
+
# 'but its status and health state do not lead to an automatic detection '
|
170
|
+
# 'of the failure state. Attention!!! The failed state is final, all data '
|
171
|
+
# 'on the device will be automatically recovered to other devices '
|
172
|
+
# 'in the cluster. ')
|
180
173
|
|
181
174
|
sub_command = self.add_sub_command(
|
182
|
-
subparser, 'get-capacity-device', '
|
183
|
-
'the device in bytes')
|
175
|
+
subparser, 'get-capacity-device', 'Get device capacity')
|
184
176
|
sub_command.add_argument("device_id", help='Storage device ID')
|
185
177
|
sub_command.add_argument("--history", help='list history records -one for every 15 minutes- '
|
186
178
|
'for XX days and YY hours -up to 10 days in total-, format: XXdYYh')
|
187
179
|
|
188
180
|
sub_command = self.add_sub_command(
|
189
|
-
subparser, 'get-io-stats-device', '
|
181
|
+
subparser, 'get-io-stats-device', 'Get device IO statistics')
|
190
182
|
sub_command.add_argument("device_id", help='Storage device ID')
|
191
183
|
sub_command.add_argument("--history", help='list history records -one for every 15 minutes- '
|
192
184
|
'for XX days and YY hours -up to 10 days in total-, format: XXdYYh')
|
@@ -278,36 +270,33 @@ class CLIWrapper:
|
|
278
270
|
sub_command.add_argument("cluster_id", help='the cluster UUID')
|
279
271
|
|
280
272
|
sub_command = self.add_sub_command(
|
281
|
-
subparser, 'get-capacity', '
|
282
|
-
'(in percent and absolute) and provisioned capacity (in percent and absolute) '
|
283
|
-
'in GB in the cluster.')
|
273
|
+
subparser, 'get-capacity', 'Get cluster capacity')
|
284
274
|
sub_command.add_argument("cluster_id", help='the cluster UUID')
|
285
275
|
sub_command.add_argument("--json", help='Print json output', required=False, action='store_true')
|
286
276
|
sub_command.add_argument("--history", help='(XXdYYh), list history records (one for every 15 minutes) '
|
287
277
|
'for XX days and YY hours (up to 10 days in total).')
|
288
278
|
|
289
279
|
sub_command = self.add_sub_command(
|
290
|
-
subparser, 'get-io-stats', '
|
280
|
+
subparser, 'get-io-stats', 'Get cluster IO statistics')
|
291
281
|
sub_command.add_argument("cluster_id", help='the cluster UUID')
|
292
282
|
sub_command.add_argument("--records", help='Number of records, default: 20', type=int, default=20)
|
293
283
|
sub_command.add_argument("--history", help='(XXdYYh), list history records (one for every 15 minutes) '
|
294
284
|
'for XX days and YY hours (up to 10 days in total).')
|
295
285
|
|
296
|
-
sub_command = self.add_sub_command(
|
297
|
-
|
298
|
-
sub_command.add_argument("cluster_id", help='the cluster UUID')
|
286
|
+
# sub_command = self.add_sub_command(
|
287
|
+
# subparser, 'get-cli-ssh-pass', 'returns the ssh password for the CLI ssh connection')
|
288
|
+
# sub_command.add_argument("cluster_id", help='the cluster UUID')
|
299
289
|
|
300
290
|
# get-logs
|
301
291
|
sub_command = self.add_sub_command(subparser, 'get-logs', 'Returns cluster status logs')
|
302
292
|
sub_command.add_argument("cluster_id", help='cluster uuid')
|
303
293
|
|
304
294
|
# get-secret
|
305
|
-
sub_command = self.add_sub_command(subparser, 'get-secret', '
|
295
|
+
sub_command = self.add_sub_command(subparser, 'get-secret', 'Get cluster secret')
|
306
296
|
sub_command.add_argument("cluster_id", help='cluster uuid')
|
307
297
|
|
308
298
|
# set-secret
|
309
|
-
sub_command = self.add_sub_command(subparser, 'upd-secret', 'Updates the secret
|
310
|
-
'one with a new one) and returns the new one.')
|
299
|
+
sub_command = self.add_sub_command(subparser, 'upd-secret', 'Updates the cluster secret')
|
311
300
|
sub_command.add_argument("cluster_id", help='cluster uuid')
|
312
301
|
sub_command.add_argument("secret", help='new 20 characters password')
|
313
302
|
|
@@ -404,7 +393,7 @@ class CLIWrapper:
|
|
404
393
|
|
405
394
|
# delete lvol
|
406
395
|
sub_command = self.add_sub_command(
|
407
|
-
subparser, 'delete', 'Delete LVol
|
396
|
+
subparser, 'delete', 'Delete LVol', usage='This is only possible, if no more snapshots and non-inflated clones '
|
408
397
|
'of the volume exist. The volume must be suspended before it can be deleted. ')
|
409
398
|
sub_command.add_argument("id", help='LVol id or ids', nargs='+')
|
410
399
|
sub_command.add_argument("--force", help='Force delete LVol from the cluster', required=False,
|
@@ -412,13 +401,13 @@ class CLIWrapper:
|
|
412
401
|
|
413
402
|
# show connection string
|
414
403
|
sub_command = self.add_sub_command(
|
415
|
-
subparser, 'connect', '
|
404
|
+
subparser, 'connect', 'Get lvol connection strings', usage='Multiple connections to the cluster are '
|
416
405
|
'always available for multi-pathing and high-availability.')
|
417
406
|
sub_command.add_argument("id", help='LVol id')
|
418
407
|
|
419
408
|
# lvol resize
|
420
409
|
sub_command = self.add_sub_command(
|
421
|
-
subparser, 'resize', 'Resize LVol
|
410
|
+
subparser, 'resize', 'Resize LVol', usage='The lvol cannot be exceed the maximum size for lvols. It cannot '
|
422
411
|
'exceed total remaining provisioned space in pool. It cannot drop below the '
|
423
412
|
'current utilization.')
|
424
413
|
sub_command.add_argument("id", help='LVol id')
|
@@ -445,23 +434,22 @@ class CLIWrapper:
|
|
445
434
|
|
446
435
|
# lvol get-capacity
|
447
436
|
sub_command = self.add_sub_command(
|
448
|
-
subparser, 'get-capacity',
|
449
|
-
'(in percent and absolute) capacity.')
|
437
|
+
subparser, 'get-capacity',"Get LVol capacity")
|
450
438
|
sub_command.add_argument("id", help='LVol id')
|
451
439
|
sub_command.add_argument("--history", help='(XXdYYh), list history records (one for every 15 minutes) '
|
452
440
|
'for XX days and YY hours (up to 10 days in total).')
|
453
441
|
|
454
442
|
# lvol get-io-stats
|
455
443
|
sub_command = self.add_sub_command(
|
456
|
-
subparser, 'get-io-stats', help="
|
444
|
+
subparser, 'get-io-stats', help="Get LVol IO statistics")
|
457
445
|
sub_command.add_argument("id", help='LVol id')
|
458
446
|
sub_command.add_argument("--history", help='(XXdYYh), list history records (one for every 15 minutes) '
|
459
447
|
'for XX days and YY hours (up to 10 days in total).')
|
460
448
|
|
461
|
-
sub_command = self.add_sub_command(subparser, 'send-cluster-map', 'send
|
449
|
+
sub_command = self.add_sub_command(subparser, 'send-cluster-map', 'send cluster map')
|
462
450
|
sub_command.add_argument("id", help='LVol id')
|
463
451
|
|
464
|
-
sub_command = self.add_sub_command(subparser, 'get-cluster-map', 'get
|
452
|
+
sub_command = self.add_sub_command(subparser, 'get-cluster-map', 'get cluster map')
|
465
453
|
sub_command.add_argument("id", help='LVol id')
|
466
454
|
|
467
455
|
# check lvol
|
@@ -534,24 +522,21 @@ class CLIWrapper:
|
|
534
522
|
sub_command.add_argument("pool_id", help='pool uuid')
|
535
523
|
|
536
524
|
# get-secret
|
537
|
-
sub_command = self.add_sub_command(subparser, 'get-secret', '
|
525
|
+
sub_command = self.add_sub_command(subparser, 'get-secret', 'Get pool secret')
|
538
526
|
sub_command.add_argument("pool_id", help='pool uuid')
|
539
527
|
|
540
528
|
# get-secret
|
541
|
-
sub_command = self.add_sub_command(subparser, 'upd-secret', 'Updates
|
542
|
-
'one with a new one) and returns the new one.')
|
529
|
+
sub_command = self.add_sub_command(subparser, 'upd-secret', 'Updates pool secret')
|
543
530
|
sub_command.add_argument("pool_id", help='pool uuid')
|
544
531
|
sub_command.add_argument("secret", help='new 20 characters password')
|
545
532
|
|
546
533
|
# get-capacity
|
547
|
-
sub_command = self.add_sub_command(subparser, 'get-capacity', '
|
548
|
-
'and utilized (percent) storage on the Pool.')
|
534
|
+
sub_command = self.add_sub_command(subparser, 'get-capacity', 'Get pool capacity')
|
549
535
|
sub_command.add_argument("pool_id", help='pool uuid')
|
550
536
|
|
551
537
|
# get-io-stats
|
552
538
|
sub_command = self.add_sub_command(
|
553
|
-
subparser, 'get-io-stats', '
|
554
|
-
'(read-IO, write-IO, total-IO, read mbs, write mbs, total mbs).')
|
539
|
+
subparser, 'get-io-stats', 'Get pool IO statistics')
|
555
540
|
sub_command.add_argument("id", help='Pool id')
|
556
541
|
sub_command.add_argument("--history", help='(XXdYYh), list history records (one for every 15 minutes) '
|
557
542
|
'for XX days and YY hours (up to 10 days in total).')
|
@@ -605,7 +590,7 @@ class CLIWrapper:
|
|
605
590
|
sub_command.add_argument("node_id", help='Caching node UUID')
|
606
591
|
sub_command.add_argument("lvol_id", help='LVol UUID')
|
607
592
|
|
608
|
-
sub_command = self.add_sub_command(subparser, 'recreate', 'recreate Caching node bdevs
|
593
|
+
sub_command = self.add_sub_command(subparser, 'recreate', 'recreate Caching node bdevs')
|
609
594
|
sub_command.add_argument("node_id", help='Caching node UUID')
|
610
595
|
|
611
596
|
def init_parser(self):
|
@@ -659,13 +644,6 @@ class CLIWrapper:
|
|
659
644
|
num_partitions_per_dev = args.partitions
|
660
645
|
jm_percent = args.jm_percent
|
661
646
|
|
662
|
-
spdk_cpu_mask = None
|
663
|
-
if args.spdk_cpu_mask:
|
664
|
-
if self.validate_cpu_mask(args.spdk_cpu_mask):
|
665
|
-
spdk_cpu_mask = args.spdk_cpu_mask
|
666
|
-
else:
|
667
|
-
return f"Invalid cpu mask value: {args.spdk_cpu_mask}"
|
668
|
-
|
669
647
|
spdk_mem = None
|
670
648
|
if args.spdk_mem:
|
671
649
|
spdk_mem = self.parse_size(args.spdk_mem)
|
@@ -673,7 +651,7 @@ class CLIWrapper:
|
|
673
651
|
return f"SPDK memory:{args.spdk_mem} must be larger than 1G"
|
674
652
|
|
675
653
|
out = storage_ops.add_node(
|
676
|
-
cluster_id, node_ip, ifname, data_nics,
|
654
|
+
cluster_id, node_ip, ifname, data_nics, spdk_mem, spdk_image, spdk_debug,
|
677
655
|
small_pool_count, large_pool_count, small_bufsize, large_bufsize, num_partitions_per_dev, jm_percent)
|
678
656
|
return out
|
679
657
|
|
@@ -692,13 +670,6 @@ class CLIWrapper:
|
|
692
670
|
spdk_image = args.spdk_image
|
693
671
|
spdk_debug = args.spdk_debug
|
694
672
|
|
695
|
-
cpu_mask = None
|
696
|
-
if args.spdk_cpu_mask:
|
697
|
-
if self.validate_cpu_mask(args.spdk_cpu_mask):
|
698
|
-
cpu_mask = args.spdk_cpu_mask
|
699
|
-
else:
|
700
|
-
return f"Invalid cpu mask value: {args.spdk_cpu_mask}"
|
701
|
-
|
702
673
|
spdk_mem = None
|
703
674
|
if args.spdk_mem:
|
704
675
|
spdk_mem = self.parse_size(args.spdk_mem)
|
@@ -712,7 +683,7 @@ class CLIWrapper:
|
|
712
683
|
large_bufsize = args.large_bufsize
|
713
684
|
|
714
685
|
ret = storage_ops.restart_storage_node(
|
715
|
-
node_id,
|
686
|
+
node_id, spdk_mem,
|
716
687
|
spdk_image, spdk_debug,
|
717
688
|
small_pool_count, large_pool_count,
|
718
689
|
small_bufsize, large_bufsize)
|
@@ -53,7 +53,7 @@ GRAYLOG_CHECK_INTERVAL_SEC = 60
|
|
53
53
|
FDB_CHECK_INTERVAL_SEC = 60
|
54
54
|
|
55
55
|
SIMPLY_BLOCK_DOCKER_IMAGE = "simplyblock/simplyblock:pre-release"
|
56
|
-
SIMPLY_BLOCK_CLI_NAME = "sbcli
|
56
|
+
SIMPLY_BLOCK_CLI_NAME = "sbcli"
|
57
57
|
TASK_EXEC_INTERVAL_SEC = 30
|
58
58
|
TASK_EXEC_RETRY_COUNT = 8
|
59
59
|
|
@@ -110,7 +110,8 @@ def _def_create_device_stack(device_obj, snode):
|
|
110
110
|
alceml_id = device_obj.get_id()
|
111
111
|
alceml_name = get_alceml_name(alceml_id)
|
112
112
|
logger.info(f"adding {alceml_name}")
|
113
|
-
ret = rpc_client.bdev_alceml_create(alceml_name, test_name, alceml_id, pba_init_mode=2
|
113
|
+
ret = rpc_client.bdev_alceml_create(alceml_name, test_name, alceml_id, pba_init_mode=2,
|
114
|
+
dev_cpu_mask=snode.dev_cpu_mask)
|
114
115
|
if not ret:
|
115
116
|
logger.error(f"Failed to create alceml bdev: {alceml_name}")
|
116
117
|
return False
|
@@ -145,7 +146,8 @@ def _def_create_device_stack(device_obj, snode):
|
|
145
146
|
ret = rpc_client.nvmf_subsystem_add_ns(subsystem_nqn, pt_name)
|
146
147
|
|
147
148
|
if hasattr(device_obj, 'jm_bdev') and device_obj.jm_bdev:
|
148
|
-
ret = rpc_client.bdev_jm_create(device_obj.jm_bdev, device_obj.alceml_bdev
|
149
|
+
ret = rpc_client.bdev_jm_create(device_obj.jm_bdev, device_obj.alceml_bdev,
|
150
|
+
dev_cpu_mask=snode.dev_cpu_mask)
|
149
151
|
if not ret:
|
150
152
|
logger.error(f"Failed to create jm bdev: {device_obj.jm_bdev}")
|
151
153
|
return False
|
@@ -265,7 +265,7 @@ def add_lvol(name, size, host_id_or_name, pool_id_or_name, use_comp, use_crypto,
|
|
265
265
|
|
266
266
|
# name, vuid, ndcs, npcs, num_blocks, block_size, alloc_names
|
267
267
|
ret = rpc_client.bdev_distrib_create(f"distr_{name}", vuid, distr_ndcs, distr_npcs, num_blocks, distr_bs, jm_names,
|
268
|
-
distr_chunk_bs)
|
268
|
+
distr_chunk_bs, dev_cpu_mask=snode.dev_cpu_mask)
|
269
269
|
bdev_stack.append({"type": "distr", "name": f"distr_{name}"})
|
270
270
|
if not ret:
|
271
271
|
logger.error("failed to create Distr bdev")
|
@@ -758,6 +758,7 @@ def _create_bdev_stack(lvol, snode, ha_comm_addrs, ha_inode_self):
|
|
758
758
|
params['jm_names'] = get_jm_names(snode)
|
759
759
|
params['ha_comm_addrs'] = ha_comm_addrs
|
760
760
|
params['ha_inode_self'] = ha_inode_self
|
761
|
+
params['dev_cpu_mask'] = snode.dev_cpu_mask
|
761
762
|
ret = rpc_client.bdev_distrib_create(**params)
|
762
763
|
if ret:
|
763
764
|
ret = distr_controller.send_cluster_map_to_node(snode)
|
@@ -49,7 +49,7 @@ def add(lvol_id, snapshot_name):
|
|
49
49
|
ret = rpc_client.bdev_distrib_create(
|
50
50
|
base_name, new_vuid, lvol.ndcs, lvol.npcs, num_blocks,
|
51
51
|
lvol.distr_bs, lvol_controller.get_jm_names(snode), lvol.distr_chunk_bs,
|
52
|
-
None, None, lvol.distr_page_size)
|
52
|
+
None, None, lvol.distr_page_size, dev_cpu_mask=snode.dev_cpu_mask)
|
53
53
|
if not ret:
|
54
54
|
logger.error("Failed to create Distr bdev")
|
55
55
|
return False, "Failed to create Distr bdev"
|
@@ -232,7 +232,8 @@ def clone(snapshot_id, clone_name, new_size=0):
|
|
232
232
|
name = f"distr_{new_vuid}_1"
|
233
233
|
ret = rpc_client.bdev_distrib_create(
|
234
234
|
name, new_vuid, lvol.ndcs, lvol.npcs, num_blocks,
|
235
|
-
lvol.distr_bs, jm_names, lvol.distr_chunk_bs, None, None, lvol.distr_page_size
|
235
|
+
lvol.distr_bs, jm_names, lvol.distr_chunk_bs, None, None, lvol.distr_page_size,
|
236
|
+
dev_cpu_mask=snode.dev_cpu_mask)
|
236
237
|
if not ret:
|
237
238
|
msg="Failed to create Distr bdev"
|
238
239
|
logger.error(msg)
|
@@ -0,0 +1,70 @@
|
|
1
|
+
# coding=utf-8
|
2
|
+
import logging
|
3
|
+
import time
|
4
|
+
import uuid
|
5
|
+
|
6
|
+
from simplyblock_core import kv_store
|
7
|
+
from simplyblock_core.controllers import tasks_events
|
8
|
+
from simplyblock_core.models.job_schedule import JobSchedule
|
9
|
+
|
10
|
+
logger = logging.getLogger()
|
11
|
+
db_controller = kv_store.DBController()
|
12
|
+
|
13
|
+
|
14
|
+
def _validate_new_task_node_restart(cluster_id, node_id):
|
15
|
+
tasks = db_controller.get_job_tasks(cluster_id)
|
16
|
+
for task in tasks:
|
17
|
+
if task.function_name == JobSchedule.FN_NODE_RESTART and task.node_id == node_id:
|
18
|
+
if task.status != JobSchedule.STATUS_DONE:
|
19
|
+
logger.info(f"Task found, skip adding new task: {task.get_id()}")
|
20
|
+
return False
|
21
|
+
return True
|
22
|
+
|
23
|
+
|
24
|
+
def _validate_new_task_dev_restart(cluster_id, node_id, device_id):
|
25
|
+
tasks = db_controller.get_job_tasks(cluster_id)
|
26
|
+
for task in tasks:
|
27
|
+
if task.function_name == JobSchedule.FN_DEV_RESTART and task.device_id == device_id:
|
28
|
+
if task.status != JobSchedule.STATUS_DONE:
|
29
|
+
logger.info(f"Task found, skip adding new task: {task.get_id()}")
|
30
|
+
return False
|
31
|
+
elif task.function_name == JobSchedule.FN_NODE_RESTART and task.node_id == node_id:
|
32
|
+
if task.status != JobSchedule.STATUS_DONE:
|
33
|
+
logger.info(f"Task found, skip adding new task: {task.get_id()}")
|
34
|
+
return False
|
35
|
+
return True
|
36
|
+
|
37
|
+
|
38
|
+
def _add_task(function_name, cluster_id, node_id, device_id):
|
39
|
+
|
40
|
+
if function_name in [JobSchedule.FN_DEV_RESTART, JobSchedule.FN_DEV_MIG]:
|
41
|
+
if not _validate_new_task_dev_restart(cluster_id, node_id, device_id):
|
42
|
+
return False
|
43
|
+
elif function_name == JobSchedule.FN_NODE_RESTART:
|
44
|
+
if not _validate_new_task_node_restart(cluster_id, node_id):
|
45
|
+
return False
|
46
|
+
|
47
|
+
task_obj = JobSchedule()
|
48
|
+
task_obj.uuid = str(uuid.uuid4())
|
49
|
+
task_obj.cluster_id = cluster_id
|
50
|
+
task_obj.node_id = node_id
|
51
|
+
task_obj.device_id = device_id
|
52
|
+
task_obj.date = int(time.time())
|
53
|
+
task_obj.function_name = function_name
|
54
|
+
task_obj.status = JobSchedule.STATUS_NEW
|
55
|
+
task_obj.write_to_db(db_controller.kv_store)
|
56
|
+
tasks_events.task_create(task_obj)
|
57
|
+
return task_obj.uuid
|
58
|
+
|
59
|
+
|
60
|
+
def add_device_mig_task(device_id):
|
61
|
+
device = db_controller.get_storage_devices(device_id)
|
62
|
+
return _add_task(JobSchedule.FN_DEV_MIG, device.cluster_id, device.node_id, device.get_id())
|
63
|
+
|
64
|
+
|
65
|
+
def add_device_to_auto_restart(device):
|
66
|
+
return _add_task(JobSchedule.FN_DEV_RESTART, device.cluster_id, device.node_id, device.get_id())
|
67
|
+
|
68
|
+
|
69
|
+
def add_node_to_auto_restart(node):
|
70
|
+
return _add_task(JobSchedule.FN_NODE_RESTART, node.cluster_id, node.get_id(), "")
|
@@ -70,6 +70,10 @@ class StorageNode(BaseModel):
|
|
70
70
|
|
71
71
|
# spdk params
|
72
72
|
"spdk_cpu_mask": {"type": str, "default": ""},
|
73
|
+
"app_thread_mask": {"type": str, "default": ""},
|
74
|
+
"pollers_mask": {"type": str, "default": ""},
|
75
|
+
"os_cores": {"type": str, "default": []},
|
76
|
+
"dev_cpu_mask": {"type": str, "default": ""},
|
73
77
|
"spdk_mem": {"type": int, "default": 0},
|
74
78
|
"spdk_image": {"type": str, "default": ""},
|
75
79
|
"spdk_debug": {"type": bool, "default": False},
|
@@ -318,7 +318,8 @@ class RPCClient:
|
|
318
318
|
params = {"name": name}
|
319
319
|
return self._request2("ultra21_bdev_pass_delete", params)
|
320
320
|
|
321
|
-
def bdev_alceml_create(self, alceml_name, nvme_name, uuid, pba_init_mode=3
|
321
|
+
def bdev_alceml_create(self, alceml_name, nvme_name, uuid, pba_init_mode=3,
|
322
|
+
dev_cpu_mask=""):
|
322
323
|
params = {
|
323
324
|
"name": alceml_name,
|
324
325
|
"cntr_path": nvme_name,
|
@@ -334,10 +335,13 @@ class RPCClient:
|
|
334
335
|
"use_optimized": True,
|
335
336
|
"pba_nbalign": 4096
|
336
337
|
}
|
338
|
+
if dev_cpu_mask:
|
339
|
+
params["bdb_lcpu_mask"] = int(dev_cpu_mask,16)
|
337
340
|
return self._request("bdev_alceml_create", params)
|
338
341
|
|
339
342
|
def bdev_distrib_create(self, name, vuid, ndcs, npcs, num_blocks, block_size, jm_names,
|
340
|
-
chunk_size, ha_comm_addrs=None, ha_inode_self=None, pba_page_size=2097152
|
343
|
+
chunk_size, ha_comm_addrs=None, ha_inode_self=None, pba_page_size=2097152,
|
344
|
+
dev_cpu_mask=""):
|
341
345
|
""""
|
342
346
|
// Optional (not specified = no HA)
|
343
347
|
// Comma-separated communication addresses, for each node, e.g. "192.168.10.1:45001,192.168.10.1:32768".
|
@@ -363,6 +367,8 @@ class RPCClient:
|
|
363
367
|
if ha_comm_addrs:
|
364
368
|
params['ha_comm_addrs'] = ha_comm_addrs
|
365
369
|
params['ha_inode_self'] = ha_inode_self
|
370
|
+
if dev_cpu_mask:
|
371
|
+
params["bdb_lcpu_mask"] = int(dev_cpu_mask, 16)
|
366
372
|
|
367
373
|
return self._request("bdev_distrib_create", params)
|
368
374
|
|
@@ -583,12 +589,14 @@ class RPCClient:
|
|
583
589
|
}
|
584
590
|
return self._request("ultra21_lvol_dismount", params)
|
585
591
|
|
586
|
-
def bdev_jm_create(self, name, name_storage1, block_size=4096):
|
592
|
+
def bdev_jm_create(self, name, name_storage1, block_size=4096, dev_cpu_mask=""):
|
587
593
|
params = {
|
588
594
|
"name": name,
|
589
595
|
"name_storage1": name_storage1,
|
590
596
|
"block_size": block_size
|
591
597
|
}
|
598
|
+
if dev_cpu_mask:
|
599
|
+
params["bdb_lcpu_mask"] = int(dev_cpu_mask, 16)
|
592
600
|
return self._request("bdev_jm_create", params)
|
593
601
|
|
594
602
|
def bdev_jm_delete(self, name):
|
@@ -641,3 +649,21 @@ class RPCClient:
|
|
641
649
|
def bdev_jm_unmap_vuid(self, name, vuid):
|
642
650
|
params = {"name": name, "vuid": vuid}
|
643
651
|
return self._request("bdev_jm_unmap_vuid", params)
|
652
|
+
|
653
|
+
def sock_impl_set_options(self):
|
654
|
+
method = "sock_impl_set_options"
|
655
|
+
params = {"impl_name": "posix", "enable_quickack": True,
|
656
|
+
"enable_zerocopy_send_server": True,
|
657
|
+
"enable_zerocopy_send_client": True}
|
658
|
+
return self._request(method, params)
|
659
|
+
|
660
|
+
def nvmf_set_config(self, poll_groups_mask):
|
661
|
+
params = {"poll_groups_mask": poll_groups_mask}
|
662
|
+
return self._request("nvmf_set_config", params)
|
663
|
+
|
664
|
+
def thread_get_stats(self):
|
665
|
+
return self._request("thread_get_stats")
|
666
|
+
|
667
|
+
def thread_set_cpumask(self, app_thread_process_id, app_thread_mask):
|
668
|
+
params = {"id": app_thread_process_id, "cpumask": app_thread_mask}
|
669
|
+
return self._request("thread_set_cpumask", params)
|
@@ -48,7 +48,7 @@ while True:
|
|
48
48
|
logger.error("No clusters found!")
|
49
49
|
else:
|
50
50
|
for cl in clusters:
|
51
|
-
tasks = db_controller.get_job_tasks(cl.get_id())
|
51
|
+
tasks = db_controller.get_job_tasks(cl.get_id(), reverse=False)
|
52
52
|
for task in tasks:
|
53
53
|
delay_seconds = constants.TASK_EXEC_INTERVAL_SEC
|
54
54
|
if task.function_name == JobSchedule.FN_DEV_MIG:
|