sbcli-dev 3.8.64__zip → 3.8.66__zip

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (150) hide show
  1. {sbcli_dev-3.8.64 → sbcli_dev-3.8.66}/PKG-INFO +1 -1
  2. {sbcli_dev-3.8.64 → sbcli_dev-3.8.66}/env_var +1 -1
  3. {sbcli_dev-3.8.64 → sbcli_dev-3.8.66}/sbcli_dev.egg-info/PKG-INFO +1 -1
  4. {sbcli_dev-3.8.64 → sbcli_dev-3.8.66}/simplyblock_cli/cli.py +14 -0
  5. {sbcli_dev-3.8.64 → sbcli_dev-3.8.66}/simplyblock_core/controllers/device_controller.py +118 -92
  6. {sbcli_dev-3.8.64 → sbcli_dev-3.8.66}/simplyblock_core/services/storage_node_monitor.py +28 -31
  7. {sbcli_dev-3.8.64 → sbcli_dev-3.8.66}/simplyblock_core/storage_node_ops.py +36 -31
  8. {sbcli_dev-3.8.64 → sbcli_dev-3.8.66}/simplyblock_core/utils.py +1 -1
  9. {sbcli_dev-3.8.64 → sbcli_dev-3.8.66}/README.md +0 -0
  10. {sbcli_dev-3.8.64 → sbcli_dev-3.8.66}/pyproject.toml +0 -0
  11. {sbcli_dev-3.8.64 → sbcli_dev-3.8.66}/sbcli_dev.egg-info/SOURCES.txt +0 -0
  12. {sbcli_dev-3.8.64 → sbcli_dev-3.8.66}/sbcli_dev.egg-info/dependency_links.txt +0 -0
  13. {sbcli_dev-3.8.64 → sbcli_dev-3.8.66}/sbcli_dev.egg-info/entry_points.txt +0 -0
  14. {sbcli_dev-3.8.64 → sbcli_dev-3.8.66}/sbcli_dev.egg-info/requires.txt +0 -0
  15. {sbcli_dev-3.8.64 → sbcli_dev-3.8.66}/sbcli_dev.egg-info/top_level.txt +0 -0
  16. {sbcli_dev-3.8.64 → sbcli_dev-3.8.66}/setup.cfg +0 -0
  17. {sbcli_dev-3.8.64 → sbcli_dev-3.8.66}/setup.py +0 -0
  18. {sbcli_dev-3.8.64 → sbcli_dev-3.8.66}/simplyblock_cli/main.py +0 -0
  19. {sbcli_dev-3.8.64 → sbcli_dev-3.8.66}/simplyblock_core/__init__.py +0 -0
  20. {sbcli_dev-3.8.64 → sbcli_dev-3.8.66}/simplyblock_core/cluster_ops.py +0 -0
  21. {sbcli_dev-3.8.64 → sbcli_dev-3.8.66}/simplyblock_core/cnode_client.py +0 -0
  22. {sbcli_dev-3.8.64 → sbcli_dev-3.8.66}/simplyblock_core/compute_node_ops.py +0 -0
  23. {sbcli_dev-3.8.64 → sbcli_dev-3.8.66}/simplyblock_core/constants.py +0 -0
  24. {sbcli_dev-3.8.64 → sbcli_dev-3.8.66}/simplyblock_core/controllers/__init__.py +0 -0
  25. {sbcli_dev-3.8.64 → sbcli_dev-3.8.66}/simplyblock_core/controllers/caching_node_controller.py +0 -0
  26. {sbcli_dev-3.8.64 → sbcli_dev-3.8.66}/simplyblock_core/controllers/cluster_events.py +0 -0
  27. {sbcli_dev-3.8.64 → sbcli_dev-3.8.66}/simplyblock_core/controllers/device_events.py +0 -0
  28. {sbcli_dev-3.8.64 → sbcli_dev-3.8.66}/simplyblock_core/controllers/events_controller.py +0 -0
  29. {sbcli_dev-3.8.64 → sbcli_dev-3.8.66}/simplyblock_core/controllers/health_controller.py +0 -0
  30. {sbcli_dev-3.8.64 → sbcli_dev-3.8.66}/simplyblock_core/controllers/lvol_controller.py +0 -0
  31. {sbcli_dev-3.8.64 → sbcli_dev-3.8.66}/simplyblock_core/controllers/lvol_events.py +0 -0
  32. {sbcli_dev-3.8.64 → sbcli_dev-3.8.66}/simplyblock_core/controllers/mgmt_events.py +0 -0
  33. {sbcli_dev-3.8.64 → sbcli_dev-3.8.66}/simplyblock_core/controllers/pool_controller.py +0 -0
  34. {sbcli_dev-3.8.64 → sbcli_dev-3.8.66}/simplyblock_core/controllers/pool_events.py +0 -0
  35. {sbcli_dev-3.8.64 → sbcli_dev-3.8.66}/simplyblock_core/controllers/snapshot_controller.py +0 -0
  36. {sbcli_dev-3.8.64 → sbcli_dev-3.8.66}/simplyblock_core/controllers/snapshot_events.py +0 -0
  37. {sbcli_dev-3.8.64 → sbcli_dev-3.8.66}/simplyblock_core/controllers/storage_events.py +0 -0
  38. {sbcli_dev-3.8.64 → sbcli_dev-3.8.66}/simplyblock_core/controllers/tasks_controller.py +0 -0
  39. {sbcli_dev-3.8.64 → sbcli_dev-3.8.66}/simplyblock_core/controllers/tasks_events.py +0 -0
  40. {sbcli_dev-3.8.64 → sbcli_dev-3.8.66}/simplyblock_core/distr_controller.py +0 -0
  41. {sbcli_dev-3.8.64 → sbcli_dev-3.8.66}/simplyblock_core/kv_store.py +0 -0
  42. {sbcli_dev-3.8.64 → sbcli_dev-3.8.66}/simplyblock_core/mgmt_node_ops.py +0 -0
  43. {sbcli_dev-3.8.64 → sbcli_dev-3.8.66}/simplyblock_core/models/__init__.py +0 -0
  44. {sbcli_dev-3.8.64 → sbcli_dev-3.8.66}/simplyblock_core/models/base_model.py +0 -0
  45. {sbcli_dev-3.8.64 → sbcli_dev-3.8.66}/simplyblock_core/models/caching_node.py +0 -0
  46. {sbcli_dev-3.8.64 → sbcli_dev-3.8.66}/simplyblock_core/models/cluster.py +0 -0
  47. {sbcli_dev-3.8.64 → sbcli_dev-3.8.66}/simplyblock_core/models/compute_node.py +0 -0
  48. {sbcli_dev-3.8.64 → sbcli_dev-3.8.66}/simplyblock_core/models/deployer.py +0 -0
  49. {sbcli_dev-3.8.64 → sbcli_dev-3.8.66}/simplyblock_core/models/events.py +0 -0
  50. {sbcli_dev-3.8.64 → sbcli_dev-3.8.66}/simplyblock_core/models/global_settings.py +0 -0
  51. {sbcli_dev-3.8.64 → sbcli_dev-3.8.66}/simplyblock_core/models/iface.py +0 -0
  52. {sbcli_dev-3.8.64 → sbcli_dev-3.8.66}/simplyblock_core/models/job_schedule.py +0 -0
  53. {sbcli_dev-3.8.64 → sbcli_dev-3.8.66}/simplyblock_core/models/lvol_model.py +0 -0
  54. {sbcli_dev-3.8.64 → sbcli_dev-3.8.66}/simplyblock_core/models/mgmt_node.py +0 -0
  55. {sbcli_dev-3.8.64 → sbcli_dev-3.8.66}/simplyblock_core/models/nvme_device.py +0 -0
  56. {sbcli_dev-3.8.64 → sbcli_dev-3.8.66}/simplyblock_core/models/pool.py +0 -0
  57. {sbcli_dev-3.8.64 → sbcli_dev-3.8.66}/simplyblock_core/models/port_stat.py +0 -0
  58. {sbcli_dev-3.8.64 → sbcli_dev-3.8.66}/simplyblock_core/models/snapshot.py +0 -0
  59. {sbcli_dev-3.8.64 → sbcli_dev-3.8.66}/simplyblock_core/models/stats.py +0 -0
  60. {sbcli_dev-3.8.64 → sbcli_dev-3.8.66}/simplyblock_core/models/storage_node.py +0 -0
  61. {sbcli_dev-3.8.64 → sbcli_dev-3.8.66}/simplyblock_core/pci_utils.py +0 -0
  62. {sbcli_dev-3.8.64 → sbcli_dev-3.8.66}/simplyblock_core/rpc_client.py +0 -0
  63. {sbcli_dev-3.8.64 → sbcli_dev-3.8.66}/simplyblock_core/scripts/__init__.py +0 -0
  64. {sbcli_dev-3.8.64 → sbcli_dev-3.8.66}/simplyblock_core/scripts/alerting/alert_resources.yaml.j2 +0 -0
  65. {sbcli_dev-3.8.64 → sbcli_dev-3.8.66}/simplyblock_core/scripts/alerting/alert_rules.yaml +0 -0
  66. {sbcli_dev-3.8.64 → sbcli_dev-3.8.66}/simplyblock_core/scripts/clean_local_storage_deploy.sh +0 -0
  67. {sbcli_dev-3.8.64 → sbcli_dev-3.8.66}/simplyblock_core/scripts/config_docker.sh +0 -0
  68. {sbcli_dev-3.8.64 → sbcli_dev-3.8.66}/simplyblock_core/scripts/dashboard.yml +0 -0
  69. {sbcli_dev-3.8.64 → sbcli_dev-3.8.66}/simplyblock_core/scripts/dashboards/cluster.json +0 -0
  70. {sbcli_dev-3.8.64 → sbcli_dev-3.8.66}/simplyblock_core/scripts/dashboards/devices.json +0 -0
  71. {sbcli_dev-3.8.64 → sbcli_dev-3.8.66}/simplyblock_core/scripts/dashboards/lvols.json +0 -0
  72. {sbcli_dev-3.8.64 → sbcli_dev-3.8.66}/simplyblock_core/scripts/dashboards/node-exporter.json +0 -0
  73. {sbcli_dev-3.8.64 → sbcli_dev-3.8.66}/simplyblock_core/scripts/dashboards/nodes.json +0 -0
  74. {sbcli_dev-3.8.64 → sbcli_dev-3.8.66}/simplyblock_core/scripts/dashboards/pools.json +0 -0
  75. {sbcli_dev-3.8.64 → sbcli_dev-3.8.66}/simplyblock_core/scripts/datasource.yml +0 -0
  76. {sbcli_dev-3.8.64 → sbcli_dev-3.8.66}/simplyblock_core/scripts/db_config_double.sh +0 -0
  77. {sbcli_dev-3.8.64 → sbcli_dev-3.8.66}/simplyblock_core/scripts/db_config_single.sh +0 -0
  78. {sbcli_dev-3.8.64 → sbcli_dev-3.8.66}/simplyblock_core/scripts/deploy_stack.sh +0 -0
  79. {sbcli_dev-3.8.64 → sbcli_dev-3.8.66}/simplyblock_core/scripts/docker-compose-swarm-monitoring.yml +0 -0
  80. {sbcli_dev-3.8.64 → sbcli_dev-3.8.66}/simplyblock_core/scripts/docker-compose-swarm.yml +0 -0
  81. {sbcli_dev-3.8.64 → sbcli_dev-3.8.66}/simplyblock_core/scripts/haproxy.cfg +0 -0
  82. {sbcli_dev-3.8.64 → sbcli_dev-3.8.66}/simplyblock_core/scripts/install_deps.sh +0 -0
  83. {sbcli_dev-3.8.64 → sbcli_dev-3.8.66}/simplyblock_core/scripts/objstore.yml +0 -0
  84. {sbcli_dev-3.8.64 → sbcli_dev-3.8.66}/simplyblock_core/scripts/prometheus.yml +0 -0
  85. {sbcli_dev-3.8.64 → sbcli_dev-3.8.66}/simplyblock_core/scripts/run_ssh.sh +0 -0
  86. {sbcli_dev-3.8.64 → sbcli_dev-3.8.66}/simplyblock_core/scripts/set_db_config.sh +0 -0
  87. {sbcli_dev-3.8.64 → sbcli_dev-3.8.66}/simplyblock_core/scripts/stack_deploy_wait.sh +0 -0
  88. {sbcli_dev-3.8.64 → sbcli_dev-3.8.66}/simplyblock_core/services/__init__.py +0 -0
  89. {sbcli_dev-3.8.64 → sbcli_dev-3.8.66}/simplyblock_core/services/cached_lvol_stat_collector.py +0 -0
  90. {sbcli_dev-3.8.64 → sbcli_dev-3.8.66}/simplyblock_core/services/caching_node_monitor.py +0 -0
  91. {sbcli_dev-3.8.64 → sbcli_dev-3.8.66}/simplyblock_core/services/cap_monitor.py +0 -0
  92. {sbcli_dev-3.8.64 → sbcli_dev-3.8.66}/simplyblock_core/services/capacity_and_stats_collector.py +0 -0
  93. {sbcli_dev-3.8.64 → sbcli_dev-3.8.66}/simplyblock_core/services/device_monitor.py +0 -0
  94. {sbcli_dev-3.8.64 → sbcli_dev-3.8.66}/simplyblock_core/services/distr_event_collector.py +0 -0
  95. {sbcli_dev-3.8.64 → sbcli_dev-3.8.66}/simplyblock_core/services/health_check_service.py +0 -0
  96. {sbcli_dev-3.8.64 → sbcli_dev-3.8.66}/simplyblock_core/services/install_service.sh +0 -0
  97. {sbcli_dev-3.8.64 → sbcli_dev-3.8.66}/simplyblock_core/services/log_agg_service.py +0 -0
  98. {sbcli_dev-3.8.64 → sbcli_dev-3.8.66}/simplyblock_core/services/lvol_monitor.py +0 -0
  99. {sbcli_dev-3.8.64 → sbcli_dev-3.8.66}/simplyblock_core/services/lvol_stat_collector.py +0 -0
  100. {sbcli_dev-3.8.64 → sbcli_dev-3.8.66}/simplyblock_core/services/main_distr_event_collector.py +0 -0
  101. {sbcli_dev-3.8.64 → sbcli_dev-3.8.66}/simplyblock_core/services/mgmt_node_monitor.py +0 -0
  102. {sbcli_dev-3.8.64 → sbcli_dev-3.8.66}/simplyblock_core/services/new_device_discovery.py +0 -0
  103. {sbcli_dev-3.8.64 → sbcli_dev-3.8.66}/simplyblock_core/services/port_stat_collector.py +0 -0
  104. {sbcli_dev-3.8.64 → sbcli_dev-3.8.66}/simplyblock_core/services/remove_service.sh +0 -0
  105. {sbcli_dev-3.8.64 → sbcli_dev-3.8.66}/simplyblock_core/services/service_template.service +0 -0
  106. {sbcli_dev-3.8.64 → sbcli_dev-3.8.66}/simplyblock_core/services/spdk_http_proxy_server.py +0 -0
  107. {sbcli_dev-3.8.64 → sbcli_dev-3.8.66}/simplyblock_core/services/tasks_runner_failed_migration.py +0 -0
  108. {sbcli_dev-3.8.64 → sbcli_dev-3.8.66}/simplyblock_core/services/tasks_runner_migration.py +0 -0
  109. {sbcli_dev-3.8.64 → sbcli_dev-3.8.66}/simplyblock_core/services/tasks_runner_new_dev_migration.py +0 -0
  110. {sbcli_dev-3.8.64 → sbcli_dev-3.8.66}/simplyblock_core/services/tasks_runner_node_add.py +0 -0
  111. {sbcli_dev-3.8.64 → sbcli_dev-3.8.66}/simplyblock_core/services/tasks_runner_restart.py +0 -0
  112. {sbcli_dev-3.8.64 → sbcli_dev-3.8.66}/simplyblock_core/shell_utils.py +0 -0
  113. {sbcli_dev-3.8.64 → sbcli_dev-3.8.66}/simplyblock_core/snode_client.py +0 -0
  114. {sbcli_dev-3.8.64 → sbcli_dev-3.8.66}/simplyblock_web/__init__.py +0 -0
  115. {sbcli_dev-3.8.64 → sbcli_dev-3.8.66}/simplyblock_web/app.py +0 -0
  116. {sbcli_dev-3.8.64 → sbcli_dev-3.8.66}/simplyblock_web/auth_middleware.py +0 -0
  117. {sbcli_dev-3.8.64 → sbcli_dev-3.8.66}/simplyblock_web/blueprints/__init__.py +0 -0
  118. {sbcli_dev-3.8.64 → sbcli_dev-3.8.66}/simplyblock_web/blueprints/caching_node_ops.py +0 -0
  119. {sbcli_dev-3.8.64 → sbcli_dev-3.8.66}/simplyblock_web/blueprints/caching_node_ops_k8s.py +0 -0
  120. {sbcli_dev-3.8.64 → sbcli_dev-3.8.66}/simplyblock_web/blueprints/node_api_basic.py +0 -0
  121. {sbcli_dev-3.8.64 → sbcli_dev-3.8.66}/simplyblock_web/blueprints/node_api_caching_docker.py +0 -0
  122. {sbcli_dev-3.8.64 → sbcli_dev-3.8.66}/simplyblock_web/blueprints/node_api_caching_ks.py +0 -0
  123. {sbcli_dev-3.8.64 → sbcli_dev-3.8.66}/simplyblock_web/blueprints/snode_ops.py +0 -0
  124. {sbcli_dev-3.8.64 → sbcli_dev-3.8.66}/simplyblock_web/blueprints/snode_ops_k8s.py +0 -0
  125. {sbcli_dev-3.8.64 → sbcli_dev-3.8.66}/simplyblock_web/blueprints/web_api_caching_node.py +0 -0
  126. {sbcli_dev-3.8.64 → sbcli_dev-3.8.66}/simplyblock_web/blueprints/web_api_cluster.py +0 -0
  127. {sbcli_dev-3.8.64 → sbcli_dev-3.8.66}/simplyblock_web/blueprints/web_api_deployer.py +0 -0
  128. {sbcli_dev-3.8.64 → sbcli_dev-3.8.66}/simplyblock_web/blueprints/web_api_device.py +0 -0
  129. {sbcli_dev-3.8.64 → sbcli_dev-3.8.66}/simplyblock_web/blueprints/web_api_lvol.py +0 -0
  130. {sbcli_dev-3.8.64 → sbcli_dev-3.8.66}/simplyblock_web/blueprints/web_api_mgmt_node.py +0 -0
  131. {sbcli_dev-3.8.64 → sbcli_dev-3.8.66}/simplyblock_web/blueprints/web_api_pool.py +0 -0
  132. {sbcli_dev-3.8.64 → sbcli_dev-3.8.66}/simplyblock_web/blueprints/web_api_snapshot.py +0 -0
  133. {sbcli_dev-3.8.64 → sbcli_dev-3.8.66}/simplyblock_web/blueprints/web_api_storage_node.py +0 -0
  134. {sbcli_dev-3.8.64 → sbcli_dev-3.8.66}/simplyblock_web/caching_node_app.py +0 -0
  135. {sbcli_dev-3.8.64 → sbcli_dev-3.8.66}/simplyblock_web/caching_node_app_k8s.py +0 -0
  136. {sbcli_dev-3.8.64 → sbcli_dev-3.8.66}/simplyblock_web/node_utils.py +0 -0
  137. {sbcli_dev-3.8.64 → sbcli_dev-3.8.66}/simplyblock_web/node_webapp.py +0 -0
  138. {sbcli_dev-3.8.64 → sbcli_dev-3.8.66}/simplyblock_web/snode_app.py +0 -0
  139. {sbcli_dev-3.8.64 → sbcli_dev-3.8.66}/simplyblock_web/snode_app_k8s.py +0 -0
  140. {sbcli_dev-3.8.64 → sbcli_dev-3.8.66}/simplyblock_web/static/delete.py +0 -0
  141. {sbcli_dev-3.8.64 → sbcli_dev-3.8.66}/simplyblock_web/static/deploy.py +0 -0
  142. {sbcli_dev-3.8.64 → sbcli_dev-3.8.66}/simplyblock_web/static/deploy_cnode.yaml +0 -0
  143. {sbcli_dev-3.8.64 → sbcli_dev-3.8.66}/simplyblock_web/static/deploy_spdk.yaml +0 -0
  144. {sbcli_dev-3.8.64 → sbcli_dev-3.8.66}/simplyblock_web/static/is_up.py +0 -0
  145. {sbcli_dev-3.8.64 → sbcli_dev-3.8.66}/simplyblock_web/static/list_deps.py +0 -0
  146. {sbcli_dev-3.8.64 → sbcli_dev-3.8.66}/simplyblock_web/static/rpac.yaml +0 -0
  147. {sbcli_dev-3.8.64 → sbcli_dev-3.8.66}/simplyblock_web/static/tst.py +0 -0
  148. {sbcli_dev-3.8.64 → sbcli_dev-3.8.66}/simplyblock_web/templates/caching_deploy_spdk.yaml.j2 +0 -0
  149. {sbcli_dev-3.8.64 → sbcli_dev-3.8.66}/simplyblock_web/templates/storage_deploy_spdk.yaml.j2 +0 -0
  150. {sbcli_dev-3.8.64 → sbcli_dev-3.8.66}/simplyblock_web/utils.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: sbcli-dev
3
- Version: 3.8.64
3
+ Version: 3.8.66
4
4
  Summary: CLI for managing SimplyBlock cluster
5
5
  Home-page: https://www.simplyblock.io/
6
6
  Author: Hamdy
@@ -1,5 +1,5 @@
1
1
  SIMPLY_BLOCK_COMMAND_NAME=sbcli-dev
2
- SIMPLY_BLOCK_VERSION=3.8.64
2
+ SIMPLY_BLOCK_VERSION=3.8.66
3
3
 
4
4
 
5
5
  SIMPLY_BLOCK_DOCKER_IMAGE=simplyblock/simplyblock:main
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: sbcli-dev
3
- Version: 3.8.64
3
+ Version: 3.8.66
4
4
  Summary: CLI for managing SimplyBlock cluster
5
5
  Home-page: https://www.simplyblock.io/
6
6
  Author: Hamdy
@@ -230,6 +230,14 @@ class CLIWrapper:
230
230
  sub_command = self.add_sub_command(subparser, "info-spdk", 'Get SPDK memory information')
231
231
  sub_command.add_argument("id", help='Node UUID')
232
232
 
233
+ sub_command = self.add_sub_command(subparser, 'remove-jm-device', 'Remove JM device')
234
+ sub_command.add_argument("jm_device_id", help='JM device ID')
235
+ sub_command.add_argument("--force", help='Force device remove', required=False, action='store_true')
236
+
237
+ sub_command = self.add_sub_command(subparser, 'restart-jm-device', 'Restart JM device')
238
+ sub_command.add_argument("jm_device_id", help='JM device ID')
239
+ sub_command.add_argument("--force", help='Force device remove', required=False, action='store_true')
240
+
233
241
  #
234
242
  # ----------------- cluster -----------------
235
243
  #
@@ -879,6 +887,12 @@ class CLIWrapper:
879
887
  elif sub_command == "get":
880
888
  ret = storage_ops.get(args.id)
881
889
 
890
+ elif sub_command == "remove-jm-device":
891
+ ret = device_controller.remove_jm_device(args.jm_device_id, args.force)
892
+
893
+ elif sub_command == "restart-jm-device":
894
+ ret = device_controller.restart_jm_device(args.jm_device_id, args.force)
895
+
882
896
  else:
883
897
  self.parser.print_help()
884
898
 
@@ -258,43 +258,13 @@ def restart_device(device_id, force=False):
258
258
  if part.startswith(nvme_controller):
259
259
  jm_part = part
260
260
  break
261
- if snode.jm_device.status == NVMeDevice.STATUS_ONLINE:
262
- set_jm_device_state(snode.jm_device.get_id(), JMDevice.STATUS_UNAVAILABLE)
263
- # delete jm stack
264
- if snode.enable_ha_jm:
265
- ret = rpc_client.subsystem_delete(snode.jm_device.nvmf_nqn)
266
-
267
- if snode.jm_device.pt_bdev:
268
- ret = rpc_client.bdev_PT_NoExcl_delete(snode.jm_device.pt_bdev)
269
-
270
- ret = rpc_client.bdev_jm_delete(snode.jm_device.jm_bdev)
271
- ret = rpc_client.bdev_alceml_delete(snode.jm_device.alceml_bdev)
272
- if snode.jm_device.testing_bdev:
273
- ret = rpc_client.bdev_passtest_delete(snode.jm_device.testing_bdev)
274
- if len(snode.jm_device.jm_nvme_bdev_list) == 2:
275
- ret = rpc_client.bdev_raid_delete(snode.jm_device.raid_bdev)
276
-
277
- # create jm
278
- jm_nvme_bdevs = snode.jm_device.jm_nvme_bdev_list
279
- if jm_part not in jm_nvme_bdevs:
280
- jm_nvme_bdevs.append(jm_part)
281
- new_jm = storage_node_ops._create_jm_stack_on_raid(rpc_client, jm_nvme_bdevs, snode, after_restart=True)
282
- if new_jm:
283
- snode = db_controller.get_storage_node_by_id(snode.get_id())
284
- snode.jm_device = new_jm
285
- snode.write_to_db(db_controller.kv_store)
286
- set_jm_device_state(snode.jm_device.get_id(), JMDevice.STATUS_ONLINE)
287
-
288
- # make other nodes connect to new jm
289
- if snode.enable_ha_jm:
290
- logger.info("Make other nodes connect to the new devices")
291
- snodes = db_controller.get_storage_nodes_by_cluster_id(snode.cluster_id)
292
- for node_index, node in enumerate(snodes):
293
- if node.get_id() == snode.get_id() or node.status != StorageNode.STATUS_ONLINE:
294
- continue
295
- node.remote_jm_devices = storage_node_ops._connect_to_remote_jm_devs(node)
296
- node.write_to_db(db_controller.kv_store)
297
- logger.info(f"connected to devices count: {len(node.remote_devices)}")
261
+
262
+ if not jm_part:
263
+ if snode.jm_device.status == NVMeDevice.STATUS_ONLINE:
264
+ remove_jm_device(snode.jm_device.get_id(), force=True)
265
+ time.sleep(3)
266
+
267
+ restart_jm_device(snode.jm_device.get_id(), force=True)
298
268
 
299
269
  return "Done"
300
270
 
@@ -373,8 +343,8 @@ def device_remove(device_id, force=True):
373
343
  if force is False:
374
344
  return False
375
345
 
376
- logger.info("Sending device event")
377
- distr_controller.send_dev_status_event(device, NVMeDevice.STATUS_REMOVED)
346
+ logger.info("Setting device unavailable")
347
+ device_set_unavailable(device_id)
378
348
 
379
349
  logger.info("Disconnecting device from all nodes")
380
350
  distr_controller.disconnect_device(device)
@@ -408,12 +378,7 @@ def device_remove(device_id, force=True):
408
378
  if not force:
409
379
  return False
410
380
 
411
- device.status = 'removed'
412
- snode.write_to_db(db_controller.kv_store)
413
- device_events.device_delete(device)
414
-
415
- for lvol in db_controller.get_lvols():
416
- lvol_controller.send_cluster_map(lvol.get_id())
381
+ device_set_state(device_id, NVMeDevice.STATUS_REMOVED)
417
382
 
418
383
  # remove device from jm raid
419
384
  if snode.jm_device.raid_bdev:
@@ -425,53 +390,11 @@ def device_remove(device_id, force=True):
425
390
  break
426
391
 
427
392
  if dev_to_remove:
428
- if len(snode.jm_device.jm_nvme_bdev_list) <= 2:
429
-
430
- set_jm_device_state(snode.jm_device.get_id(), JMDevice.STATUS_UNAVAILABLE)
431
-
432
- # delete jm stack
433
- if snode.enable_ha_jm:
434
- ret = rpc_client.subsystem_delete(snode.jm_device.nvmf_nqn)
435
-
436
- if snode.jm_device.pt_bdev:
437
- ret = rpc_client.bdev_PT_NoExcl_delete(snode.jm_device.pt_bdev)
438
-
439
- ret = rpc_client.bdev_jm_delete(snode.jm_device.jm_bdev)
440
- ret = rpc_client.bdev_alceml_delete(snode.jm_device.alceml_bdev)
441
- if snode.jm_device.testing_bdev:
442
- ret = rpc_client.bdev_passtest_delete(snode.jm_device.testing_bdev)
443
- if len(snode.jm_device.jm_nvme_bdev_list) == 2:
444
- ret = rpc_client.bdev_raid_delete(snode.jm_device.raid_bdev)
445
-
446
-
447
- # create jm
448
- jm_nvme_bdevs = snode.jm_device.jm_nvme_bdev_list
449
- jm_nvme_bdevs.remove(dev_to_remove)
450
- if len(jm_nvme_bdevs) > 0:
451
- new_jm = storage_node_ops._create_jm_stack_on_raid(rpc_client, jm_nvme_bdevs, snode, after_restart=True)
452
- if new_jm:
453
- snode = db_controller.get_storage_node_by_id(snode.get_id())
454
- snode.jm_device = new_jm
455
- snode.write_to_db(db_controller.kv_store)
456
- set_jm_device_state(snode.jm_device.get_id(), JMDevice.STATUS_ONLINE)
457
-
458
- # make other nodes connect to new jm
459
- if snode.enable_ha_jm:
460
- logger.info("Make other nodes connect to the new devices")
461
- snodes = db_controller.get_storage_nodes_by_cluster_id(snode.cluster_id)
462
- for node_index, node in enumerate(snodes):
463
- if node.get_id() == snode.get_id() or node.status != StorageNode.STATUS_ONLINE:
464
- continue
465
- node.remote_jm_devices = storage_node_ops._connect_to_remote_jm_devs(node)
466
- node.write_to_db(db_controller.kv_store)
467
- logger.info(f"connected to devices count: {len(node.remote_devices)}")
468
- time.sleep(3)
469
-
470
- elif len(snode.jm_device.jm_nvme_bdev_list) > 2:
471
- ret = rpc_client.bdev_raid_remove_base_bdev(snode.jm_device.raid_bdev, dev_to_remove)
472
- if ret:
473
- snode.jm_device.jm_nvme_bdev_list.remove(dev_to_remove)
474
- snode.write_to_db(db_controller.kv_store)
393
+ if snode.jm_device.status == NVMeDevice.STATUS_ONLINE:
394
+ remove_jm_device(snode.jm_device.get_id(), force=True)
395
+ time.sleep(3)
396
+
397
+ restart_jm_device(snode.jm_device.get_id(), force=True)
475
398
 
476
399
  return True
477
400
 
@@ -800,4 +723,107 @@ def set_jm_device_state(device_id, state):
800
723
 
801
724
  jm_device.status = state
802
725
  snode.write_to_db(db_controller.kv_store)
726
+
727
+ if snode.enable_ha_jm and state in [NVMeDevice.STATUS_ONLINE, NVMeDevice.STATUS_UNAVAILABLE]:
728
+ # make other nodes connect to the new devices
729
+ logger.info("Make other nodes connect to the new devices")
730
+ snodes = db_controller.get_storage_nodes_by_cluster_id(snode.cluster_id)
731
+ for node_index, node in enumerate(snodes):
732
+ if node.get_id() == snode.get_id() or node.status != StorageNode.STATUS_ONLINE:
733
+ continue
734
+ logger.info(f"Connecting to node: {node.get_id()}")
735
+ node.remote_jm_devices = storage_node_ops._connect_to_remote_jm_devs(node)
736
+ node.write_to_db(db_controller.kv_store)
737
+ logger.info(f"connected to devices count: {len(node.remote_jm_devices)}")
738
+
739
+ return True
740
+
741
+
742
+ def remove_jm_device(device_id, force=False):
743
+ db_controller = DBController()
744
+ jm_device = None
745
+ snode = None
746
+ for node in db_controller.get_storage_nodes():
747
+ if node.jm_device.get_id() == device_id:
748
+ jm_device = node.jm_device
749
+ snode = node
750
+ break
751
+ if not jm_device:
752
+ logger.error("device not found")
753
+ return False
754
+
755
+ if jm_device.status != JMDevice.STATUS_ONLINE:
756
+ logger.warning("device is not online")
757
+ if not force:
758
+ return False
759
+
760
+ set_jm_device_state(snode.jm_device.get_id(), JMDevice.STATUS_UNAVAILABLE)
761
+
762
+ rpc_client = RPCClient(snode.mgmt_ip, snode.rpc_port, snode.rpc_username, snode.rpc_password)
763
+
764
+ # delete jm stack
765
+ if snode.enable_ha_jm:
766
+ ret = rpc_client.subsystem_delete(snode.jm_device.nvmf_nqn)
767
+ if not ret:
768
+ logger.error("device not found")
769
+
770
+ if snode.jm_device.pt_bdev:
771
+ ret = rpc_client.bdev_PT_NoExcl_delete(snode.jm_device.pt_bdev)
772
+
773
+ ret = rpc_client.bdev_jm_delete(snode.jm_device.jm_bdev)
774
+
775
+ ret = rpc_client.bdev_alceml_delete(snode.jm_device.alceml_bdev)
776
+
777
+ if snode.jm_device.testing_bdev:
778
+ ret = rpc_client.bdev_passtest_delete(snode.jm_device.testing_bdev)
779
+
780
+ if len(snode.jm_device.jm_nvme_bdev_list) == 2:
781
+ ret = rpc_client.bdev_raid_delete(snode.jm_device.raid_bdev)
782
+
783
+ return True
784
+
785
+
786
+ def restart_jm_device(device_id, force=False):
787
+ db_controller = DBController()
788
+ jm_device = None
789
+ snode = None
790
+ for node in db_controller.get_storage_nodes():
791
+ if node.jm_device.get_id() == device_id:
792
+ jm_device = node.jm_device
793
+ snode = node
794
+ break
795
+ if not jm_device:
796
+ logger.error("device not found")
797
+ return False
798
+
799
+ if jm_device.status == JMDevice.STATUS_ONLINE:
800
+ logger.warning("device is online")
801
+ if not force:
802
+ return False
803
+
804
+ # add to jm raid
805
+ if snode.jm_device and snode.jm_device.raid_bdev:
806
+ rpc_client = RPCClient(snode.mgmt_ip, snode.rpc_port, snode.rpc_username, snode.rpc_password)
807
+ bdevs_names = [d['name'] for d in rpc_client.get_bdevs()]
808
+ jm_nvme_bdevs = []
809
+ for dev in snode.nvme_devices:
810
+ if dev.status != NVMeDevice.STATUS_ONLINE:
811
+ continue
812
+ dev_part = f"{dev.nvme_bdev[:-1]}1"
813
+ if dev_part in bdevs_names:
814
+ if dev_part not in jm_nvme_bdevs:
815
+ jm_nvme_bdevs.append(dev_part)
816
+
817
+ if len(jm_nvme_bdevs) > 0:
818
+ new_jm = storage_node_ops._create_jm_stack_on_raid(rpc_client, jm_nvme_bdevs, snode, after_restart=True)
819
+ if not new_jm:
820
+ logger.error("failed to create jm stack")
821
+ return False
822
+
823
+ else:
824
+ snode = db_controller.get_storage_node_by_id(snode.get_id())
825
+ snode.jm_device = new_jm
826
+ snode.write_to_db(db_controller.kv_store)
827
+ set_jm_device_state(snode.jm_device.get_id(), JMDevice.STATUS_ONLINE)
828
+
803
829
  return True
@@ -136,36 +136,33 @@ while True:
136
136
  logger.info(f"Check 2: ping mgmt ip {snode.mgmt_ip} ... {ping_check}")
137
137
 
138
138
  # 2- check node API
139
- node_api_check = False
140
- spdk_process = False
141
- node_rpc_check = False
142
- if ping_check:
143
- node_api_check = health_controller._check_node_api(snode.mgmt_ip)
144
- logger.info(f"Check: node API {snode.mgmt_ip}:5000 ... {node_api_check}")
145
-
146
- # 3- check spdk_process
147
- spdk_process = health_controller._check_spdk_process_up(snode.mgmt_ip)
148
- logger.info(f"Check: spdk process {snode.mgmt_ip}:5000 ... {spdk_process}")
149
-
150
- # node_rpc_check = True
151
- # 3- check node RPC
152
- node_rpc_check = health_controller._check_node_rpc(
153
- snode.mgmt_ip, snode.rpc_port, snode.rpc_username, snode.rpc_password, timeout=5, retry=1)
154
- logger.info(f"Check: node RPC {snode.mgmt_ip}:{snode.rpc_port} ... {node_rpc_check}")
155
-
156
- # check JM device
157
- if snode.jm_device:
158
- if snode.jm_device.status in [JMDevice.STATUS_ONLINE, JMDevice.STATUS_UNAVAILABLE]:
159
- ret = health_controller.check_jm_device(snode.jm_device.get_id())
160
- if ret:
161
- logger.info(f"JM bdev is online: {snode.jm_device.get_id()}")
162
- if snode.jm_device.status != JMDevice.STATUS_ONLINE:
163
- device_controller.set_jm_device_state(snode.jm_device.get_id(), JMDevice.STATUS_ONLINE)
164
- else:
165
- logger.error(f"JM bdev is offline: {snode.jm_device.get_id()}")
166
- if snode.jm_device.status != JMDevice.STATUS_UNAVAILABLE:
167
- device_controller.set_jm_device_state(snode.jm_device.get_id(),
168
- JMDevice.STATUS_UNAVAILABLE)
139
+
140
+ node_api_check = health_controller._check_node_api(snode.mgmt_ip)
141
+ logger.info(f"Check: node API {snode.mgmt_ip}:5000 ... {node_api_check}")
142
+
143
+ # 3- check spdk_process
144
+ spdk_process = health_controller._check_spdk_process_up(snode.mgmt_ip)
145
+ logger.info(f"Check: spdk process {snode.mgmt_ip}:5000 ... {spdk_process}")
146
+
147
+ # node_rpc_check = True
148
+ # 3- check node RPC
149
+ node_rpc_check = health_controller._check_node_rpc(
150
+ snode.mgmt_ip, snode.rpc_port, snode.rpc_username, snode.rpc_password, timeout=5, retry=1)
151
+ logger.info(f"Check: node RPC {snode.mgmt_ip}:{snode.rpc_port} ... {node_rpc_check}")
152
+
153
+ # check JM device
154
+ if snode.jm_device:
155
+ if snode.jm_device.status in [JMDevice.STATUS_ONLINE, JMDevice.STATUS_UNAVAILABLE]:
156
+ ret = health_controller.check_jm_device(snode.jm_device.get_id())
157
+ if ret:
158
+ logger.info(f"JM bdev is online: {snode.jm_device.get_id()}")
159
+ if snode.jm_device.status != JMDevice.STATUS_ONLINE:
160
+ device_controller.set_jm_device_state(snode.jm_device.get_id(), JMDevice.STATUS_ONLINE)
161
+ else:
162
+ logger.error(f"JM bdev is offline: {snode.jm_device.get_id()}")
163
+ if snode.jm_device.status != JMDevice.STATUS_UNAVAILABLE:
164
+ device_controller.set_jm_device_state(snode.jm_device.get_id(),
165
+ JMDevice.STATUS_UNAVAILABLE)
169
166
 
170
167
  is_node_online = ping_check and node_api_check and node_rpc_check and spdk_process
171
168
  if is_node_online:
@@ -178,7 +175,7 @@ while True:
178
175
 
179
176
  elif ping_check and node_api_check and not spdk_process:
180
177
  # add node to auto restart
181
- if cluster.status != Cluster.STATUS_UNREADY:
178
+ if cluster.status == Cluster.STATUS_ACTIVE:
182
179
  tasks_controller.add_node_to_auto_restart(snode)
183
180
 
184
181
  update_cluster_status(cluster_id)
@@ -792,8 +792,8 @@ def _connect_to_remote_jm_devs(this_node):
792
792
  if node.get_id() == this_node.get_id() or node.status != StorageNode.STATUS_ONLINE:
793
793
  continue
794
794
 
795
- logger.info(f"Connecting to node {node.get_id()}")
796
- if node.jm_device:
795
+ if node.jm_device and node.jm_device.status == NVMeDevice.STATUS_ONLINE:
796
+ logger.info(f"Connecting to JM on node: {node.get_id()}")
797
797
  name = f"remote_{node.jm_device.jm_bdev}"
798
798
  bdev_name = f"{name}n1"
799
799
  ret = rpc_client.get_bdevs(bdev_name)
@@ -2608,6 +2608,14 @@ def create_lvstore(snode, ndcs, npcs, distr_bs, distr_chunk_bs, page_size_in_blo
2608
2608
  },
2609
2609
  "distribs_list": distrib_list
2610
2610
  },
2611
+ # {
2612
+ # "type": "bdev_ptnonexcl",
2613
+ # "name": "raid_PT",
2614
+ # "params": {
2615
+ # "name": "raid_PT",
2616
+ # "base_bdev_name": raid_device
2617
+ # }
2618
+ # },
2611
2619
  {
2612
2620
  "type": "bdev_lvstore",
2613
2621
  "name": lvs_name,
@@ -2649,7 +2657,6 @@ def _create_bdev_stack(snode, lvstore_stack=None):
2649
2657
  else:
2650
2658
  stack = lvstore_stack
2651
2659
 
2652
- raid_device = None
2653
2660
  for bdev in stack:
2654
2661
  type = bdev['type']
2655
2662
  name = bdev['name']
@@ -2670,11 +2677,34 @@ def _create_bdev_stack(snode, lvstore_stack=None):
2670
2677
  elif type == "bdev_lvstore" and lvstore_stack:
2671
2678
  ret = rpc_client.create_lvstore(**params)
2672
2679
 
2680
+ elif type == "bdev_ptnonexcl":
2681
+ ret = rpc_client.bdev_PT_NoExcl_create(**params)
2682
+ if ret:
2683
+ try:
2684
+ # add pass through
2685
+ pt_name = "raid_PT"
2686
+ subsystem_nqn = snode.subsystem + ":dev:raid"
2687
+ logger.info("creating raid subsystem %s", subsystem_nqn)
2688
+ ret = rpc_client.subsystem_create(subsystem_nqn, 'sbcli-cn', 'sbcli-cn')
2689
+ for iface in snode.data_nics:
2690
+ if iface.ip4_address:
2691
+ tr_type = iface.get_transport_type()
2692
+ logger.info("adding listener for %s on IP %s" % (subsystem_nqn, iface.ip4_address))
2693
+ ret = rpc_client.listeners_create(subsystem_nqn, tr_type, iface.ip4_address, "4420")
2694
+ break
2695
+ logger.info(f"add {pt_name} to subsystem")
2696
+ ret = rpc_client.nvmf_subsystem_add_ns(subsystem_nqn, pt_name)
2697
+ if not ret:
2698
+ logger.error(f"Failed to add: {pt_name} to the subsystem: {subsystem_nqn}")
2699
+ return False
2700
+ except:
2701
+ pass
2702
+
2673
2703
  elif type == "bdev_raid":
2674
2704
  distribs_list = bdev["distribs_list"]
2675
2705
  strip_size_kb = params["strip_size_kb"]
2676
2706
  ret = rpc_client.bdev_raid_create(name, distribs_list, strip_size_kb=strip_size_kb)
2677
- raid_device = name
2707
+
2678
2708
  else:
2679
2709
  logger.debug(f"Unknown BDev type: {type}")
2680
2710
  continue
@@ -2688,33 +2718,6 @@ def _create_bdev_stack(snode, lvstore_stack=None):
2688
2718
  _remove_bdev_stack(created_bdevs[::-1], rpc_client)
2689
2719
  return False, f"Failed to create BDev: {name}"
2690
2720
 
2691
- if raid_device:
2692
- try:
2693
- # add pass through
2694
- rpc_client = RPCClient(snode.mgmt_ip, snode.rpc_port, snode.rpc_username, snode.rpc_password)
2695
- pt_name = "raid_PT"
2696
- ret = rpc_client.bdev_PT_NoExcl_create(pt_name, raid_device)
2697
- if not ret:
2698
- logger.error(f"Failed to create pt noexcl bdev: {pt_name}")
2699
- return False
2700
-
2701
- subsystem_nqn = snode.subsystem + ":dev:raid"
2702
- logger.info("creating raid subsystem %s", subsystem_nqn)
2703
- ret = rpc_client.subsystem_create(subsystem_nqn, 'sbcli-cn', 'sbcli-cn')
2704
- for iface in snode.data_nics:
2705
- if iface.ip4_address:
2706
- tr_type = iface.get_transport_type()
2707
- logger.info("adding listener for %s on IP %s" % (subsystem_nqn, iface.ip4_address))
2708
- ret = rpc_client.listeners_create(subsystem_nqn, tr_type, iface.ip4_address, "4420")
2709
- break
2710
- logger.info(f"add {pt_name} to subsystem")
2711
- ret = rpc_client.nvmf_subsystem_add_ns(subsystem_nqn, pt_name)
2712
- if not ret:
2713
- logger.error(f"Failed to add: {pt_name} to the subsystem: {subsystem_nqn}")
2714
- return False
2715
- except:
2716
- pass
2717
-
2718
2721
  return True, None
2719
2722
 
2720
2723
 
@@ -2730,6 +2733,8 @@ def _remove_bdev_stack(bdev_stack, rpc_client, remove_distr_only=False):
2730
2733
  ret = rpc_client.bdev_raid_delete(name)
2731
2734
  elif type == "bdev_lvstore" and not remove_distr_only:
2732
2735
  ret = rpc_client.bdev_lvol_delete_lvstore(name)
2736
+ elif type == "bdev_ptnonexcl":
2737
+ ret = rpc_client.bdev_PT_NoExcl_delete(name)
2733
2738
  else:
2734
2739
  logger.debug(f"Unknown BDev type: {type}")
2735
2740
  continue
@@ -261,7 +261,7 @@ def process_records(records, records_count):
261
261
 
262
262
  def ping_host(ip):
263
263
  logger.debug(f"Pinging ip ... {ip}")
264
- response = os.system(f"ping -c 3 -W 3 {ip} > /dev/null")
264
+ response = os.system(f"ping -c 1 -W 3 {ip} > /dev/null")
265
265
  if response == 0:
266
266
  logger.debug(f"{ip} is UP")
267
267
  return True
File without changes
File without changes
File without changes
File without changes