sbcli-pre 1.2.5__zip → 1.2.6__zip
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.6}/PKG-INFO +1 -1
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.6}/env_var +1 -1
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.6}/sbcli_pre.egg-info/PKG-INFO +1 -1
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.6}/sbcli_pre.egg-info/SOURCES.txt +5 -3
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.6}/simplyblock_cli/cli.py +113 -115
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.6}/simplyblock_core/cluster_ops.py +138 -235
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.6}/simplyblock_core/constants.py +5 -7
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.6}/simplyblock_core/controllers/caching_node_controller.py +8 -6
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.6}/simplyblock_core/controllers/cluster_events.py +9 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.6}/simplyblock_core/controllers/device_controller.py +56 -63
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.6}/simplyblock_core/controllers/events_controller.py +5 -3
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.6}/simplyblock_core/controllers/health_controller.py +30 -40
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.6}/simplyblock_core/controllers/lvol_controller.py +51 -38
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.6}/simplyblock_core/controllers/pool_controller.py +8 -4
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.6}/simplyblock_core/controllers/snapshot_controller.py +9 -3
- sbcli_pre-1.2.6/simplyblock_core/controllers/tasks_controller.py +103 -0
- sbcli_pre-1.2.6/simplyblock_core/controllers/tasks_events.py +37 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.6}/simplyblock_core/distr_controller.py +13 -9
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.6}/simplyblock_core/kv_store.py +47 -20
- sbcli_pre-1.2.6/simplyblock_core/mgmt_node_ops.py +205 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.6}/simplyblock_core/models/events.py +9 -1
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.6}/simplyblock_core/models/job_schedule.py +6 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.6}/simplyblock_core/models/nvme_device.py +42 -4
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.6}/simplyblock_core/models/storage_node.py +9 -1
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.6}/simplyblock_core/rpc_client.py +55 -10
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.6}/simplyblock_core/scripts/__init__.py +0 -4
- sbcli_pre-1.2.5/simplyblock_core/scripts/alerting/alert_resources.yaml → sbcli_pre-1.2.6/simplyblock_core/scripts/alerting/alert_resources.yaml.j2 +54 -5
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.6}/simplyblock_core/scripts/dashboards/cluster.json +1 -1
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.6}/simplyblock_core/scripts/deploy_stack.sh +9 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.6}/simplyblock_core/scripts/docker-compose-swarm-monitoring.yml +32 -15
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.6}/simplyblock_core/scripts/docker-compose-swarm.yml +17 -2
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.6}/simplyblock_core/scripts/haproxy.cfg +15 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.6}/simplyblock_core/scripts/install_deps.sh +3 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.6}/simplyblock_core/scripts/stack_deploy_wait.sh +1 -1
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.6}/simplyblock_core/services/capacity_and_stats_collector.py +1 -1
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.6}/simplyblock_core/services/device_monitor.py +5 -46
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.6}/simplyblock_core/services/distr_event_collector.py +10 -11
- sbcli_pre-1.2.6/simplyblock_core/services/health_check_service.py +134 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.6}/simplyblock_core/services/lvol_monitor.py +1 -1
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.6}/simplyblock_core/services/lvol_stat_collector.py +1 -1
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.6}/simplyblock_core/services/port_stat_collector.py +0 -1
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.6}/simplyblock_core/services/storage_node_monitor.py +49 -44
- sbcli_pre-1.2.6/simplyblock_core/services/tasks_runner_migration.py +61 -0
- sbcli_pre-1.2.5/simplyblock_core/services/job_tasks.py → sbcli_pre-1.2.6/simplyblock_core/services/tasks_runner_restart.py +95 -46
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.6}/simplyblock_core/snode_client.py +12 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.6}/simplyblock_core/storage_node_ops.py +525 -336
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.6}/simplyblock_core/utils.py +46 -1
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.6}/simplyblock_web/blueprints/snode_ops.py +103 -25
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.6}/simplyblock_web/blueprints/web_api_cluster.py +20 -43
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.6}/simplyblock_web/blueprints/web_api_device.py +10 -7
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.6}/simplyblock_web/blueprints/web_api_lvol.py +9 -5
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.6}/simplyblock_web/blueprints/web_api_pool.py +14 -5
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.6}/simplyblock_web/blueprints/web_api_storage_node.py +3 -10
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.6}/simplyblock_web/node_utils.py +0 -2
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.6}/simplyblock_web/utils.py +8 -0
- sbcli_pre-1.2.5/simplyblock_core/mgmt_node_ops.py +0 -80
- sbcli_pre-1.2.5/simplyblock_core/scripts/apply_dashboard.sh +0 -22
- sbcli_pre-1.2.5/simplyblock_core/services/health_check_service.py +0 -136
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.6}/README.md +0 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.6}/pyproject.toml +0 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.6}/sbcli_pre.egg-info/dependency_links.txt +0 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.6}/sbcli_pre.egg-info/entry_points.txt +0 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.6}/sbcli_pre.egg-info/requires.txt +0 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.6}/sbcli_pre.egg-info/top_level.txt +0 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.6}/setup.cfg +0 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.6}/setup.py +0 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.6}/simplyblock_cli/main.py +0 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.6}/simplyblock_core/__init__.py +0 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.6}/simplyblock_core/cnode_client.py +0 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.6}/simplyblock_core/compute_node_ops.py +0 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.6}/simplyblock_core/controllers/__init__.py +0 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.6}/simplyblock_core/controllers/device_events.py +0 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.6}/simplyblock_core/controllers/lvol_events.py +0 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.6}/simplyblock_core/controllers/mgmt_events.py +0 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.6}/simplyblock_core/controllers/pool_events.py +0 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.6}/simplyblock_core/controllers/snapshot_events.py +0 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.6}/simplyblock_core/controllers/storage_events.py +0 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.6}/simplyblock_core/models/__init__.py +0 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.6}/simplyblock_core/models/base_model.py +0 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.6}/simplyblock_core/models/caching_node.py +0 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.6}/simplyblock_core/models/cluster.py +0 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.6}/simplyblock_core/models/compute_node.py +0 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.6}/simplyblock_core/models/deployer.py +0 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.6}/simplyblock_core/models/global_settings.py +0 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.6}/simplyblock_core/models/iface.py +0 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.6}/simplyblock_core/models/lvol_model.py +0 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.6}/simplyblock_core/models/mgmt_node.py +0 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.6}/simplyblock_core/models/pool.py +0 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.6}/simplyblock_core/models/port_stat.py +0 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.6}/simplyblock_core/models/snapshot.py +0 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.6}/simplyblock_core/models/stats.py +0 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.6}/simplyblock_core/pci_utils.py +0 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.6}/simplyblock_core/scripts/alerting/alert_rules.yaml +0 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.6}/simplyblock_core/scripts/clean_local_storage_deploy.sh +0 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.6}/simplyblock_core/scripts/config_docker.sh +0 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.6}/simplyblock_core/scripts/dashboards/devices.json +0 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.6}/simplyblock_core/scripts/dashboards/lvols.json +0 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.6}/simplyblock_core/scripts/dashboards/node-exporter.json +0 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.6}/simplyblock_core/scripts/dashboards/nodes.json +0 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.6}/simplyblock_core/scripts/dashboards/pools.json +0 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.6}/simplyblock_core/scripts/datasource.yml +0 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.6}/simplyblock_core/scripts/db_config_double.sh +0 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.6}/simplyblock_core/scripts/db_config_single.sh +0 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.6}/simplyblock_core/scripts/prometheus.yml +0 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.6}/simplyblock_core/scripts/run_ssh.sh +0 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.6}/simplyblock_core/scripts/set_db_config.sh +0 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.6}/simplyblock_core/services/__init__.py +0 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.6}/simplyblock_core/services/caching_node_monitor.py +0 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.6}/simplyblock_core/services/cap_monitor.py +0 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.6}/simplyblock_core/services/install_service.sh +0 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.6}/simplyblock_core/services/log_agg_service.py +0 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.6}/simplyblock_core/services/mgmt_node_monitor.py +0 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.6}/simplyblock_core/services/remove_service.sh +0 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.6}/simplyblock_core/services/service_template.service +0 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.6}/simplyblock_core/shell_utils.py +0 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.6}/simplyblock_web/__init__.py +0 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.6}/simplyblock_web/app.py +0 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.6}/simplyblock_web/auth_middleware.py +0 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.6}/simplyblock_web/blueprints/__init__.py +0 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.6}/simplyblock_web/blueprints/caching_node_ops.py +0 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.6}/simplyblock_web/blueprints/caching_node_ops_k8s.py +0 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.6}/simplyblock_web/blueprints/node_api_basic.py +0 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.6}/simplyblock_web/blueprints/node_api_caching_docker.py +0 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.6}/simplyblock_web/blueprints/node_api_caching_ks.py +0 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.6}/simplyblock_web/blueprints/web_api_caching_node.py +0 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.6}/simplyblock_web/blueprints/web_api_deployer.py +0 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.6}/simplyblock_web/blueprints/web_api_mgmt_node.py +0 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.6}/simplyblock_web/blueprints/web_api_snapshot.py +0 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.6}/simplyblock_web/caching_node_app.py +0 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.6}/simplyblock_web/caching_node_app_k8s.py +0 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.6}/simplyblock_web/node_webapp.py +0 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.6}/simplyblock_web/snode_app.py +0 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.6}/simplyblock_web/static/delete.py +0 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.6}/simplyblock_web/static/deploy.py +0 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.6}/simplyblock_web/static/deploy_cnode.yaml +0 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.6}/simplyblock_web/static/deploy_spdk.yaml +0 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.6}/simplyblock_web/static/is_up.py +0 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.6}/simplyblock_web/static/list_deps.py +0 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.6}/simplyblock_web/static/rpac.yaml +0 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.6}/simplyblock_web/static/tst.py +0 -0
- {sbcli_pre-1.2.5 → sbcli_pre-1.2.6}/simplyblock_web/templates/deploy_spdk.yaml.j2 +0 -0
@@ -257,7 +257,7 @@ def process_records(records, records_count):
|
|
257
257
|
|
258
258
|
def ping_host(ip):
|
259
259
|
logger.debug(f"Pinging ip ... {ip}")
|
260
|
-
response = os.system(f"ping -c 1 {ip} > /dev/null")
|
260
|
+
response = os.system(f"ping -c 1 -W 3 {ip} > /dev/null")
|
261
261
|
if response == 0:
|
262
262
|
logger.debug(f"{ip} is UP")
|
263
263
|
return True
|
@@ -280,3 +280,48 @@ def sum_records(records):
|
|
280
280
|
|
281
281
|
def get_random_vuid():
|
282
282
|
return 1 + int(random.random() * 10000)
|
283
|
+
|
284
|
+
|
285
|
+
def calculate_core_allocation(cpu_count):
|
286
|
+
'''
|
287
|
+
If number of cpu cores >= 8, tune cpu core mask
|
288
|
+
1. Never use core 0 for spdk.
|
289
|
+
2. For every 8 cores, leave one core to the operating system
|
290
|
+
3. Do not use more than 15% of remaining available cores for nvme pollers
|
291
|
+
4. Use one dedicated core for app_thread
|
292
|
+
5. distribute distrib bdevs and alceml bdevs to all other cores
|
293
|
+
JIRA ticket link/s
|
294
|
+
https://simplyblock.atlassian.net/browse/SFAM-885
|
295
|
+
'''
|
296
|
+
|
297
|
+
all_cores = list(range(0, cpu_count))
|
298
|
+
# Calculate the number of cores to exclude for the OS
|
299
|
+
if cpu_count == 8:
|
300
|
+
os_cores_count = 1
|
301
|
+
else:
|
302
|
+
os_cores_count = 1 + (cpu_count // 8)
|
303
|
+
|
304
|
+
# Calculate os cores
|
305
|
+
os_cores = all_cores[0:os_cores_count]
|
306
|
+
|
307
|
+
# Calculate available cores
|
308
|
+
available_cores_count = cpu_count - os_cores_count
|
309
|
+
|
310
|
+
# Calculate NVMe pollers
|
311
|
+
nvme_pollers_count = int(available_cores_count * 0.15)
|
312
|
+
nvme_pollers_cores = all_cores[os_cores_count:os_cores_count + nvme_pollers_count]
|
313
|
+
|
314
|
+
# Allocate core for app_thread
|
315
|
+
app_thread_core = all_cores[os_cores_count + nvme_pollers_count:os_cores_count + nvme_pollers_count + 1]
|
316
|
+
|
317
|
+
# Calculate bdb_lcpu cores
|
318
|
+
bdb_lcpu_cores = all_cores[os_cores_count + nvme_pollers_count + 1:]
|
319
|
+
|
320
|
+
return os_cores, nvme_pollers_cores, app_thread_core, bdb_lcpu_cores
|
321
|
+
|
322
|
+
|
323
|
+
def generate_mask(cores):
|
324
|
+
mask = 0
|
325
|
+
for core in cores:
|
326
|
+
mask |= (1 << core)
|
327
|
+
return f'0x{mask:X}'
|
@@ -14,7 +14,8 @@ from flask import request
|
|
14
14
|
|
15
15
|
from simplyblock_web import utils, node_utils
|
16
16
|
|
17
|
-
from simplyblock_core import scripts, constants
|
17
|
+
from simplyblock_core import scripts, constants, shell_utils
|
18
|
+
from ec2_metadata import ec2_metadata
|
18
19
|
|
19
20
|
logger = logging.getLogger(__name__)
|
20
21
|
logger.setLevel(logging.DEBUG)
|
@@ -22,11 +23,24 @@ bp = Blueprint("snode", __name__, url_prefix="/snode")
|
|
22
23
|
|
23
24
|
cluster_id_file = "/etc/foundationdb/sbcli_cluster_id"
|
24
25
|
|
25
|
-
|
26
|
-
|
27
|
-
|
26
|
+
CPU_INFO = cpuinfo.get_cpu_info()
|
27
|
+
HOSTNAME, _, _ = node_utils.run_command("hostname -s")
|
28
|
+
SYSTEM_ID = ""
|
29
|
+
EC2_PUBLIC_IP = ""
|
30
|
+
EC2_MD = ""
|
31
|
+
|
28
32
|
try:
|
29
|
-
|
33
|
+
SYSTEM_ID = ec2_metadata.instance_id
|
34
|
+
except:
|
35
|
+
SYSTEM_ID, _, _ = node_utils.run_command("dmidecode -s system-uuid")
|
36
|
+
|
37
|
+
try:
|
38
|
+
EC2_PUBLIC_IP = ec2_metadata.public_ipv4
|
39
|
+
except:
|
40
|
+
pass
|
41
|
+
|
42
|
+
try:
|
43
|
+
EC2_MD = ec2_metadata.instance_identity_document
|
30
44
|
except:
|
31
45
|
pass
|
32
46
|
|
@@ -67,23 +81,10 @@ def spdk_process_start():
|
|
67
81
|
spdk_cpu_mask = None
|
68
82
|
if 'spdk_cpu_mask' in data:
|
69
83
|
spdk_cpu_mask = data['spdk_cpu_mask']
|
84
|
+
|
70
85
|
spdk_mem = None
|
71
86
|
if 'spdk_mem' in data:
|
72
87
|
spdk_mem = data['spdk_mem']
|
73
|
-
node_cpu_count = os.cpu_count()
|
74
|
-
|
75
|
-
if spdk_cpu_mask:
|
76
|
-
try:
|
77
|
-
requested_cpu_count = len(format(int(spdk_cpu_mask, 16), 'b'))
|
78
|
-
if requested_cpu_count > node_cpu_count:
|
79
|
-
return utils.get_response(
|
80
|
-
False,
|
81
|
-
f"The requested cpu count: {requested_cpu_count} "
|
82
|
-
f"is larger than the node's cpu count: {node_cpu_count}")
|
83
|
-
except:
|
84
|
-
spdk_cpu_mask = hex(int(math.pow(2, node_cpu_count)) - 1)
|
85
|
-
else:
|
86
|
-
spdk_cpu_mask = hex(int(math.pow(2, node_cpu_count)) - 1)
|
87
88
|
|
88
89
|
if spdk_mem:
|
89
90
|
spdk_mem = int(utils.parse_size(spdk_mem) / (1000 * 1000))
|
@@ -229,11 +230,11 @@ def get_info():
|
|
229
230
|
out = {
|
230
231
|
"cluster_id": get_cluster_id(),
|
231
232
|
|
232
|
-
"hostname":
|
233
|
-
"system_id":
|
233
|
+
"hostname": HOSTNAME,
|
234
|
+
"system_id": SYSTEM_ID,
|
234
235
|
|
235
|
-
"cpu_count":
|
236
|
-
"cpu_hz":
|
236
|
+
"cpu_count": CPU_INFO['count'],
|
237
|
+
"cpu_hz": CPU_INFO['hz_advertised'][0],
|
237
238
|
|
238
239
|
"memory": node_utils.get_memory(),
|
239
240
|
"hugepages": node_utils.get_huge_memory(),
|
@@ -247,9 +248,9 @@ def get_info():
|
|
247
248
|
|
248
249
|
"network_interface": node_utils.get_nics_data(),
|
249
250
|
|
250
|
-
"ec2_metadata":
|
251
|
+
"ec2_metadata": EC2_MD,
|
251
252
|
|
252
|
-
"ec2_public_ip":
|
253
|
+
"ec2_public_ip": EC2_PUBLIC_IP,
|
253
254
|
}
|
254
255
|
return utils.get_response(out)
|
255
256
|
|
@@ -303,3 +304,80 @@ def leave_swarm():
|
|
303
304
|
except:
|
304
305
|
pass
|
305
306
|
return utils.get_response(True)
|
307
|
+
|
308
|
+
|
309
|
+
@bp.route('/make_gpt_partitions', methods=['POST'])
|
310
|
+
def make_gpt_partitions_for_nbd():
|
311
|
+
nbd_device = '/dev/nbd0'
|
312
|
+
jm_percent = '3'
|
313
|
+
num_partitions = 1
|
314
|
+
|
315
|
+
try:
|
316
|
+
data = request.get_json()
|
317
|
+
nbd_device = data['nbd_device']
|
318
|
+
jm_percent = data['jm_percent']
|
319
|
+
num_partitions = data['num_partitions']
|
320
|
+
except:
|
321
|
+
pass
|
322
|
+
|
323
|
+
cmd_list = [
|
324
|
+
f"parted -fs {nbd_device} mklabel gpt",
|
325
|
+
f"parted -f {nbd_device} mkpart journal \"0%\" \"{jm_percent}%\""
|
326
|
+
]
|
327
|
+
sg_cmd_list = [
|
328
|
+
f"sgdisk -t 1:6527994e-2c5a-4eec-9613-8f5944074e8b {nbd_device}",
|
329
|
+
]
|
330
|
+
perc_per_partition = int((100 - jm_percent) / num_partitions)
|
331
|
+
for i in range(num_partitions):
|
332
|
+
st = jm_percent + (i * perc_per_partition)
|
333
|
+
en = st + perc_per_partition
|
334
|
+
cmd_list.append(f"parted -f {nbd_device} mkpart part{(i+1)} \"{st}%\" \"{en}%\"")
|
335
|
+
sg_cmd_list.append(f"sgdisk -t {(i+2)}:6527994e-2c5a-4eec-9613-8f5944074e8b {nbd_device}")
|
336
|
+
|
337
|
+
for cmd in cmd_list+sg_cmd_list:
|
338
|
+
logger.debug(cmd)
|
339
|
+
out, err, ret_code = shell_utils.run_command(cmd)
|
340
|
+
logger.debug(out)
|
341
|
+
logger.debug(ret_code)
|
342
|
+
if ret_code != 0:
|
343
|
+
logger.error(err)
|
344
|
+
return utils.get_response(False, f"Error running cmd: {cmd}, returncode: {ret_code}, output: {out}, err: {err}")
|
345
|
+
time.sleep(1)
|
346
|
+
|
347
|
+
return utils.get_response(True)
|
348
|
+
|
349
|
+
|
350
|
+
@bp.route('/delete_dev_gpt_partitions', methods=['POST'])
|
351
|
+
def delete_gpt_partitions_for_dev():
|
352
|
+
|
353
|
+
data = request.get_json()
|
354
|
+
|
355
|
+
if "device_pci" not in data:
|
356
|
+
return utils.get_response(False, "Required parameter is missing: device_pci")
|
357
|
+
|
358
|
+
device_pci = data['device_pci']
|
359
|
+
|
360
|
+
cmd_list = [
|
361
|
+
f"echo -n \"{device_pci}\" > /sys/bus/pci/drivers/uio_pci_generic/unbind",
|
362
|
+
f"echo -n \"{device_pci}\" > /sys/bus/pci/drivers/nvme/bind",
|
363
|
+
]
|
364
|
+
|
365
|
+
for cmd in cmd_list:
|
366
|
+
logger.debug(cmd)
|
367
|
+
ret = os.popen(cmd).read().strip()
|
368
|
+
logger.debug(ret)
|
369
|
+
time.sleep(1)
|
370
|
+
|
371
|
+
device_name = os.popen(f"ls /sys/devices/pci0000:00/{device_pci}/nvme/nvme*/ | grep nvme").read().strip()
|
372
|
+
cmd_list = [
|
373
|
+
f"parted -fs /dev/{device_name} mklabel gpt",
|
374
|
+
f"echo -n \"{device_pci}\" > /sys/bus/pci/drivers/nvme/unbind",
|
375
|
+
]
|
376
|
+
|
377
|
+
for cmd in cmd_list:
|
378
|
+
logger.debug(cmd)
|
379
|
+
ret = os.popen(cmd).read().strip()
|
380
|
+
logger.debug(ret)
|
381
|
+
time.sleep(1)
|
382
|
+
|
383
|
+
return utils.get_response(True)
|
@@ -22,50 +22,27 @@ db_controller = kv_store.DBController()
|
|
22
22
|
|
23
23
|
@bp.route('/cluster', methods=['POST'])
|
24
24
|
def add_cluster():
|
25
|
+
|
26
|
+
blk_size = 512
|
27
|
+
page_size_in_blocks = 2097152
|
28
|
+
cap_warn = 0
|
29
|
+
cap_crit = 0
|
30
|
+
prov_cap_warn = 0
|
31
|
+
prov_cap_crit = 0
|
32
|
+
|
25
33
|
cl_data = request.get_json()
|
26
|
-
if 'blk_size'
|
27
|
-
|
28
|
-
|
29
|
-
|
30
|
-
|
31
|
-
|
32
|
-
if '
|
33
|
-
|
34
|
-
|
35
|
-
|
36
|
-
|
37
|
-
|
38
|
-
if 'dhchap' not in cl_data:
|
39
|
-
return utils.get_response_error("missing required param: dhchap", 400)
|
40
|
-
if 'NQN' not in cl_data:
|
41
|
-
return utils.get_response_error("missing required param: NQN", 400)
|
42
|
-
|
43
|
-
c = Cluster()
|
44
|
-
c.uuid = str(uuid.uuid4())
|
45
|
-
|
46
|
-
if cl_data['blk_size'] not in [512, 4096]:
|
47
|
-
return utils.get_response_error("blk_size can be 512 or 4096", 400)
|
48
|
-
|
49
|
-
if cl_data['ha_type'] not in ["single", "ha"]:
|
50
|
-
return utils.get_response_error("ha_type can be single or ha", 400)
|
51
|
-
|
52
|
-
if cl_data['dhchap'] not in ["off", "one-way", "bi-direct"]:
|
53
|
-
return utils.get_response_error("dhchap can be off, one-way or bi-direct", 400)
|
54
|
-
|
55
|
-
c.blk_size = cl_data['blk_size']
|
56
|
-
c.page_size_in_blocks = cl_data['page_size_in_blocks']
|
57
|
-
c.model_ids = cl_data['model_ids']
|
58
|
-
c.ha_type = cl_data['ha_type']
|
59
|
-
c.tls = cl_data['tls']
|
60
|
-
c.auth_hosts_only = cl_data['auth-hosts-only']
|
61
|
-
c.nqn = cl_data['nqn']
|
62
|
-
c.iscsi = cl_data['iscsi'] or False
|
63
|
-
c.dhchap = cl_data['dhchap']
|
64
|
-
c.cluster_status = Cluster.STATUS_ACTIVE
|
65
|
-
c.updated_at = int(time.time())
|
66
|
-
c.write_to_db(db_controller.kv_store)
|
67
|
-
|
68
|
-
return utils.get_response(c.to_dict()), 201
|
34
|
+
if 'blk_size' in cl_data:
|
35
|
+
if cl_data['blk_size'] not in [512, 4096]:
|
36
|
+
return utils.get_response_error("blk_size can be 512 or 4096", 400)
|
37
|
+
else:
|
38
|
+
blk_size = cl_data['blk_size']
|
39
|
+
|
40
|
+
if 'page_size_in_blocks' in cl_data:
|
41
|
+
page_size_in_blocks = cl_data['page_size_in_blocks']
|
42
|
+
|
43
|
+
ret = cluster_ops.add_cluster(blk_size, page_size_in_blocks, cap_warn, cap_crit, prov_cap_warn, prov_cap_crit)
|
44
|
+
|
45
|
+
return utils.get_response(ret)
|
69
46
|
|
70
47
|
|
71
48
|
@bp.route('/cluster', methods=['GET'], defaults={'uuid': None})
|
@@ -3,7 +3,7 @@
|
|
3
3
|
|
4
4
|
import logging
|
5
5
|
|
6
|
-
from flask import Blueprint
|
6
|
+
from flask import Blueprint, request
|
7
7
|
|
8
8
|
from simplyblock_core.controllers import device_controller
|
9
9
|
from simplyblock_web import utils
|
@@ -33,12 +33,15 @@ def list_devices_by_node(uuid):
|
|
33
33
|
def list_storage_devices(uuid):
|
34
34
|
devices = []
|
35
35
|
if uuid:
|
36
|
-
dev = db_controller.
|
36
|
+
dev = db_controller.get_storage_device_by_id(uuid)
|
37
37
|
if not dev:
|
38
38
|
return utils.get_response_error(f"device not found: {uuid}", 404)
|
39
39
|
devices = [dev]
|
40
40
|
else:
|
41
|
-
|
41
|
+
cluster_id = utils.get_cluster_id(request)
|
42
|
+
nodes = db_controller.get_storage_nodes_by_cluster_id(cluster_id)
|
43
|
+
for node in nodes:
|
44
|
+
devices.append(node.nvme_devices)
|
42
45
|
data = []
|
43
46
|
for dev in devices:
|
44
47
|
data.append(dev.get_clean_dict())
|
@@ -47,7 +50,7 @@ def list_storage_devices(uuid):
|
|
47
50
|
@bp.route('/device/capacity/<string:uuid>/history/<string:history>', methods=['GET'])
|
48
51
|
@bp.route('/device/capacity/<string:uuid>', methods=['GET'], defaults={'history': None})
|
49
52
|
def device_capacity(uuid, history):
|
50
|
-
device = db_controller.
|
53
|
+
device = db_controller.get_storage_device_by_id(uuid)
|
51
54
|
if not device:
|
52
55
|
return utils.get_response_error(f"devices not found: {uuid}", 404)
|
53
56
|
|
@@ -58,7 +61,7 @@ def device_capacity(uuid, history):
|
|
58
61
|
@bp.route('/device/iostats/<string:uuid>/history/<string:history>', methods=['GET'])
|
59
62
|
@bp.route('/device/iostats/<string:uuid>', methods=['GET'], defaults={'history': None})
|
60
63
|
def device_iostats(uuid, history):
|
61
|
-
devices = db_controller.
|
64
|
+
devices = db_controller.get_storage_device_by_id(uuid)
|
62
65
|
if not devices:
|
63
66
|
return utils.get_response_error(f"devices not found: {uuid}", 404)
|
64
67
|
|
@@ -71,7 +74,7 @@ def device_iostats(uuid, history):
|
|
71
74
|
|
72
75
|
@bp.route('/device/reset/<string:uuid>', methods=['GET'])
|
73
76
|
def device_reset(uuid):
|
74
|
-
devices = db_controller.
|
77
|
+
devices = db_controller.get_storage_device_by_id(uuid)
|
75
78
|
if not devices:
|
76
79
|
return utils.get_response_error(f"devices not found: {uuid}", 404)
|
77
80
|
|
@@ -81,7 +84,7 @@ def device_reset(uuid):
|
|
81
84
|
|
82
85
|
@bp.route('/device/remove/<string:uuid>', methods=['GET'])
|
83
86
|
def device_remove(uuid):
|
84
|
-
devices = db_controller.
|
87
|
+
devices = db_controller.get_storage_device_by_id(uuid)
|
85
88
|
if not devices:
|
86
89
|
return utils.get_response_error(f"devices not found: {uuid}", 404)
|
87
90
|
|
@@ -19,17 +19,21 @@ bp = Blueprint("lvol", __name__)
|
|
19
19
|
db_controller = kv_store.DBController()
|
20
20
|
|
21
21
|
|
22
|
-
@bp.route('/lvol', defaults={'uuid': None}, methods=['GET'])
|
23
|
-
@bp.route('/lvol/<string:uuid>', methods=['GET'])
|
24
|
-
|
22
|
+
@bp.route('/lvol', defaults={'uuid': None, "cluster_id": None}, methods=['GET'])
|
23
|
+
@bp.route('/lvol/<string:uuid>', defaults={"cluster_id": None}, methods=['GET'])
|
24
|
+
@bp.route('/lvol/cluster_id/<string:cluster_id>', defaults={'uuid': None,}, methods=['GET'])
|
25
|
+
def list_lvols(uuid, cluster_id):
|
25
26
|
if uuid:
|
26
27
|
lvol = db_controller.get_lvol_by_id(uuid)
|
27
28
|
if lvol:
|
28
29
|
lvols = [lvol]
|
29
30
|
else:
|
30
31
|
return utils.get_response_error(f"LVol not found: {uuid}", 404)
|
32
|
+
elif cluster_id:
|
33
|
+
lvols = db_controller.get_lvols(cluster_id)
|
31
34
|
else:
|
32
|
-
|
35
|
+
cluster_id = utils.get_cluster_id(request)
|
36
|
+
lvols = db_controller.get_lvols(cluster_id)
|
33
37
|
data = []
|
34
38
|
for lvol in lvols:
|
35
39
|
data.append(lvol.get_clean_dict())
|
@@ -123,7 +127,7 @@ def add_lvol():
|
|
123
127
|
if not pool:
|
124
128
|
return utils.get_response(None, f"Pool not found: {pool_id_or_name}", 400)
|
125
129
|
|
126
|
-
for lvol in db_controller.get_lvols():
|
130
|
+
for lvol in db_controller.get_lvols(): # pass
|
127
131
|
if lvol.pool_uuid == pool.get_id():
|
128
132
|
if lvol.lvol_name == name:
|
129
133
|
return utils.get_response(lvol.get_id())
|
@@ -17,17 +17,21 @@ bp = Blueprint("pool", __name__)
|
|
17
17
|
db_controller = kv_store.DBController()
|
18
18
|
|
19
19
|
|
20
|
-
@bp.route('/pool', defaults={'uuid': None}, methods=['GET'])
|
21
|
-
@bp.route('/pool/<string:uuid>', methods=['GET'])
|
22
|
-
|
20
|
+
@bp.route('/pool', defaults={'uuid': None, "cluster_id": None}, methods=['GET'])
|
21
|
+
@bp.route('/pool/<string:uuid>', defaults={"cluster_id": None}, methods=['GET'])
|
22
|
+
@bp.route('/pool/cluster_id/<string:cluster_id>', defaults={'uuid': None,}, methods=['GET'])
|
23
|
+
def list_pools(uuid, cluster_id):
|
23
24
|
if uuid:
|
24
25
|
pool = db_controller.get_pool_by_id(uuid)
|
25
26
|
if pool:
|
26
27
|
pools = [pool]
|
27
28
|
else:
|
28
29
|
return utils.get_response_error(f"Pool not found: {uuid}", 404)
|
30
|
+
elif cluster_id:
|
31
|
+
pools = db_controller.get_pools(cluster_id)
|
29
32
|
else:
|
30
|
-
|
33
|
+
cluster_id = utils.get_cluster_id(request)
|
34
|
+
pools = db_controller.get_pools(cluster_id)
|
31
35
|
data = []
|
32
36
|
for pool in pools:
|
33
37
|
data.append(pool.get_clean_dict())
|
@@ -39,6 +43,7 @@ def add_pool():
|
|
39
43
|
"""
|
40
44
|
Params:
|
41
45
|
| name (required) | LVol name or id
|
46
|
+
| cluster_id (required) | Cluster uuid
|
42
47
|
| pool_max | Pool maximum size: 10M, 10G, 10(bytes)
|
43
48
|
| lvol_max | LVol maximum size: 10M, 10G, 10(bytes)
|
44
49
|
| no_secret | pool is created with a secret
|
@@ -51,7 +56,11 @@ def add_pool():
|
|
51
56
|
if 'name' not in pool_data:
|
52
57
|
return utils.get_response_error("missing required param: name", 400)
|
53
58
|
|
59
|
+
if 'cluster_id' not in pool_data:
|
60
|
+
return utils.get_response_error("missing required param: cluster_id", 400)
|
61
|
+
|
54
62
|
name = pool_data['name']
|
63
|
+
cluster_id = pool_data['cluster_id']
|
55
64
|
for p in db_controller.get_pools():
|
56
65
|
if p.pool_name == name:
|
57
66
|
return utils.get_response_error(f"Pool found with the same name: {name}", 400)
|
@@ -75,7 +84,7 @@ def add_pool():
|
|
75
84
|
|
76
85
|
ret = pool_controller.add_pool(
|
77
86
|
name, pool_max_size, lvol_max_size, max_rw_iops, max_rw_mbytes,
|
78
|
-
max_r_mbytes_per_sec, max_w_mbytes_per_sec, pool_secret)
|
87
|
+
max_r_mbytes_per_sec, max_w_mbytes_per_sec, pool_secret, cluster_id)
|
79
88
|
|
80
89
|
return utils.get_response(ret)
|
81
90
|
|
@@ -30,7 +30,8 @@ def list_storage_nodes(uuid):
|
|
30
30
|
else:
|
31
31
|
return utils.get_response_error(f"node not found: {uuid}", 404)
|
32
32
|
else:
|
33
|
-
|
33
|
+
cluster_id = utils.get_cluster_id(request)
|
34
|
+
nodes = db_controller.get_storage_nodes_by_cluster_id(cluster_id)
|
34
35
|
data = []
|
35
36
|
for node in nodes:
|
36
37
|
d = node.get_clean_dict()
|
@@ -179,14 +180,6 @@ def storage_node_add():
|
|
179
180
|
data_nics = req_data['data_nics']
|
180
181
|
data_nics = data_nics.split(",")
|
181
182
|
|
182
|
-
spdk_cpu_mask = None
|
183
|
-
if 'spdk_cpu_mask' in req_data:
|
184
|
-
msk = req_data['spdk_cpu_mask']
|
185
|
-
if utils.validate_cpu_mask(msk):
|
186
|
-
spdk_cpu_mask = msk
|
187
|
-
else:
|
188
|
-
return utils.get_response_error(f"Invalid cpu mask value: {msk}", 400)
|
189
|
-
|
190
183
|
spdk_mem = None
|
191
184
|
if 'spdk_mem' in req_data:
|
192
185
|
mem = req_data['spdk_mem']
|
@@ -195,7 +188,7 @@ def storage_node_add():
|
|
195
188
|
return utils.get_response_error(f"SPDK memory:{mem} must be larger than 1G", 400)
|
196
189
|
|
197
190
|
out = storage_node_ops.add_node(
|
198
|
-
cluster_id, node_ip, ifname, data_nics,
|
191
|
+
cluster_id, node_ip, ifname, data_nics, spdk_mem,
|
199
192
|
spdk_image=spdk_image, spdk_debug=spdk_debug)
|
200
193
|
|
201
194
|
return utils.get_response(out)
|
@@ -85,3 +85,11 @@ def get_int_value_or_default(data, key, default):
|
|
85
85
|
return int(get_value_or_default(data, key, default))
|
86
86
|
except Exception:
|
87
87
|
return default
|
88
|
+
|
89
|
+
|
90
|
+
def get_cluster_id(request):
|
91
|
+
au = request.headers["Authorization"]
|
92
|
+
if len(au.split()) == 2:
|
93
|
+
cluster_id = au.split()[0]
|
94
|
+
cluster_secret = au.split()[1]
|
95
|
+
return cluster_id
|
@@ -1,80 +0,0 @@
|
|
1
|
-
# coding=utf-8
|
2
|
-
import json
|
3
|
-
import logging
|
4
|
-
import uuid
|
5
|
-
|
6
|
-
import docker
|
7
|
-
|
8
|
-
from simplyblock_core import utils
|
9
|
-
from simplyblock_core.controllers import mgmt_events
|
10
|
-
from simplyblock_core.kv_store import DBController
|
11
|
-
from simplyblock_core.models.mgmt_node import MgmtNode
|
12
|
-
|
13
|
-
logger = logging.getLogger()
|
14
|
-
|
15
|
-
|
16
|
-
def add_mgmt_node(mgmt_ip, cluster_id=None):
|
17
|
-
db_controller = DBController()
|
18
|
-
hostname = utils.get_hostname()
|
19
|
-
node = db_controller.get_mgmt_node_by_hostname(hostname)
|
20
|
-
if node:
|
21
|
-
logger.error("Node already exists in the cluster")
|
22
|
-
return False
|
23
|
-
|
24
|
-
node = MgmtNode()
|
25
|
-
node.uuid = str(uuid.uuid4())
|
26
|
-
node.hostname = hostname
|
27
|
-
node.docker_ip_port = f"{mgmt_ip}:2375"
|
28
|
-
node.cluster_id = cluster_id
|
29
|
-
node.mgmt_ip = mgmt_ip
|
30
|
-
node.status = MgmtNode.STATUS_ONLINE
|
31
|
-
node.write_to_db(db_controller.kv_store)
|
32
|
-
|
33
|
-
mgmt_events.mgmt_add(node)
|
34
|
-
logger.info("Done")
|
35
|
-
return True
|
36
|
-
|
37
|
-
|
38
|
-
def list_mgmt_nodes(is_json):
|
39
|
-
db_controller = DBController()
|
40
|
-
nodes = db_controller.get_mgmt_nodes()
|
41
|
-
data = []
|
42
|
-
output = ""
|
43
|
-
|
44
|
-
for node in nodes:
|
45
|
-
logging.debug(node)
|
46
|
-
logging.debug("*" * 20)
|
47
|
-
data.append({
|
48
|
-
"UUID": node.get_id(),
|
49
|
-
"Hostname": node.hostname,
|
50
|
-
"IP": node.mgmt_ip,
|
51
|
-
"Status": node.status,
|
52
|
-
})
|
53
|
-
|
54
|
-
if not data:
|
55
|
-
return output
|
56
|
-
|
57
|
-
if is_json:
|
58
|
-
output = json.dumps(data, indent=2)
|
59
|
-
else:
|
60
|
-
output = utils.print_table(data)
|
61
|
-
return output
|
62
|
-
|
63
|
-
|
64
|
-
def remove_mgmt_node(uuid):
|
65
|
-
db_controller = DBController()
|
66
|
-
snode = db_controller.get_mgmt_node_by_id(uuid)
|
67
|
-
if not snode:
|
68
|
-
logger.error("can not find node")
|
69
|
-
return False
|
70
|
-
|
71
|
-
logging.info("Removing mgmt node")
|
72
|
-
snode.remove(db_controller.kv_store)
|
73
|
-
|
74
|
-
logger.info("Leaving swarm...")
|
75
|
-
node_docker = docker.DockerClient(base_url=f"tcp://{snode.docker_ip_port}", version="auto")
|
76
|
-
node_docker.swarm.leave()
|
77
|
-
|
78
|
-
mgmt_events.mgmt_remove(snode)
|
79
|
-
logging.info("done")
|
80
|
-
|
@@ -1,22 +0,0 @@
|
|
1
|
-
#!/bin/bash
|
2
|
-
|
3
|
-
TD=$(dirname -- "$(readlink -f -- "$0")")
|
4
|
-
|
5
|
-
# Grafana Password
|
6
|
-
export grafanaPassword=$1
|
7
|
-
|
8
|
-
# Grafana username
|
9
|
-
GF_ADMIN_USER=admin
|
10
|
-
|
11
|
-
HOST=0.0.0.0:3000
|
12
|
-
|
13
|
-
DASHBOARDS="${TD}/dashboards"
|
14
|
-
for dashboard in "${DASHBOARDS}/cluster.json" "${DASHBOARDS}/devices.json" "${DASHBOARDS}/nodes.json" "${DASHBOARDS}/lvols.json" "${DASHBOARDS}/pools.json" "${DASHBOARDS}/node-exporter.json"; do
|
15
|
-
echo -e "\nUploading dashboard: ${dashboard}"
|
16
|
-
curl -X POST -H "Content-Type: application/json" \
|
17
|
-
-d "@${dashboard}" \
|
18
|
-
"http://${GF_ADMIN_USER}:${grafanaPassword}@${HOST}/api/dashboards/import"
|
19
|
-
echo ""
|
20
|
-
done
|
21
|
-
|
22
|
-
echo "Cluster deployment complete."
|