sbcli-pre 25.6.1__tar.gz → 25.6.2__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (174) hide show
  1. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/PKG-INFO +1 -1
  2. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/sbcli_pre.egg-info/PKG-INFO +1 -1
  3. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/simplyblock_core/controllers/health_controller.py +81 -27
  4. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/simplyblock_core/env_var +1 -1
  5. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/simplyblock_core/models/nvme_device.py +1 -0
  6. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/simplyblock_core/models/storage_node.py +1 -4
  7. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/simplyblock_core/rpc_client.py +14 -6
  8. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/simplyblock_core/services/health_check_service.py +1 -1
  9. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/simplyblock_core/services/storage_node_monitor.py +4 -2
  10. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/simplyblock_core/storage_node_ops.py +66 -28
  11. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/simplyblock_web/blueprints/web_api_metrics.py +26 -26
  12. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/README.md +0 -0
  13. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/pyproject.toml +0 -0
  14. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/requirements.txt +0 -0
  15. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/sbcli_pre.egg-info/SOURCES.txt +0 -0
  16. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/sbcli_pre.egg-info/dependency_links.txt +0 -0
  17. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/sbcli_pre.egg-info/entry_points.txt +0 -0
  18. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/sbcli_pre.egg-info/requires.txt +0 -0
  19. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/sbcli_pre.egg-info/top_level.txt +0 -0
  20. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/setup.cfg +0 -0
  21. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/setup.py +0 -0
  22. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/simplyblock_cli/__init__.py +0 -0
  23. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/simplyblock_cli/cli.py +0 -0
  24. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/simplyblock_cli/clibase.py +0 -0
  25. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/simplyblock_cli/main.py +0 -0
  26. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/simplyblock_core/__init__.py +0 -0
  27. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/simplyblock_core/cluster_ops.py +0 -0
  28. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/simplyblock_core/cnode_client.py +0 -0
  29. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/simplyblock_core/constants.py +0 -0
  30. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/simplyblock_core/controllers/__init__.py +0 -0
  31. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/simplyblock_core/controllers/caching_node_controller.py +0 -0
  32. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/simplyblock_core/controllers/cluster_events.py +0 -0
  33. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/simplyblock_core/controllers/device_controller.py +0 -0
  34. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/simplyblock_core/controllers/device_events.py +0 -0
  35. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/simplyblock_core/controllers/events_controller.py +0 -0
  36. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/simplyblock_core/controllers/lvol_controller.py +0 -0
  37. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/simplyblock_core/controllers/lvol_events.py +0 -0
  38. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/simplyblock_core/controllers/mgmt_events.py +0 -0
  39. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/simplyblock_core/controllers/pool_controller.py +0 -0
  40. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/simplyblock_core/controllers/pool_events.py +0 -0
  41. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/simplyblock_core/controllers/snapshot_controller.py +0 -0
  42. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/simplyblock_core/controllers/snapshot_events.py +0 -0
  43. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/simplyblock_core/controllers/storage_events.py +0 -0
  44. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/simplyblock_core/controllers/tasks_controller.py +0 -0
  45. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/simplyblock_core/controllers/tasks_events.py +0 -0
  46. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/simplyblock_core/controllers/tcp_ports_events.py +0 -0
  47. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/simplyblock_core/db_controller.py +0 -0
  48. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/simplyblock_core/distr_controller.py +0 -0
  49. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/simplyblock_core/mgmt_node_ops.py +0 -0
  50. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/simplyblock_core/models/__init__.py +0 -0
  51. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/simplyblock_core/models/base_model.py +0 -0
  52. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/simplyblock_core/models/caching_node.py +0 -0
  53. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/simplyblock_core/models/cluster.py +0 -0
  54. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/simplyblock_core/models/deployer.py +0 -0
  55. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/simplyblock_core/models/events.py +0 -0
  56. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/simplyblock_core/models/hublvol.py +0 -0
  57. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/simplyblock_core/models/iface.py +0 -0
  58. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/simplyblock_core/models/job_schedule.py +0 -0
  59. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/simplyblock_core/models/lvol_model.py +0 -0
  60. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/simplyblock_core/models/mgmt_node.py +0 -0
  61. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/simplyblock_core/models/pool.py +0 -0
  62. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/simplyblock_core/models/port_stat.py +0 -0
  63. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/simplyblock_core/models/snapshot.py +0 -0
  64. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/simplyblock_core/models/stats.py +0 -0
  65. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/simplyblock_core/pci_utils.py +0 -0
  66. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/simplyblock_core/scripts/__init__.py +0 -0
  67. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/simplyblock_core/scripts/alerting/alert_resources.yaml.j2 +0 -0
  68. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/simplyblock_core/scripts/alerting/alert_rules.yaml +0 -0
  69. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/simplyblock_core/scripts/clean_local_storage_deploy.sh +0 -0
  70. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/simplyblock_core/scripts/config_docker.sh +0 -0
  71. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/simplyblock_core/scripts/dashboard.yml +0 -0
  72. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/simplyblock_core/scripts/dashboards/cluster.json +0 -0
  73. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/simplyblock_core/scripts/dashboards/devices.json +0 -0
  74. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/simplyblock_core/scripts/dashboards/lvols.json +0 -0
  75. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/simplyblock_core/scripts/dashboards/node-exporter.json +0 -0
  76. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/simplyblock_core/scripts/dashboards/nodes.json +0 -0
  77. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/simplyblock_core/scripts/dashboards/pools.json +0 -0
  78. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/simplyblock_core/scripts/datasource.yml +0 -0
  79. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/simplyblock_core/scripts/db_config_double.sh +0 -0
  80. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/simplyblock_core/scripts/db_config_single.sh +0 -0
  81. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/simplyblock_core/scripts/deploy_fdb.sh +0 -0
  82. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/simplyblock_core/scripts/deploy_stack.sh +0 -0
  83. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/simplyblock_core/scripts/docker-compose-swarm-monitoring.yml +0 -0
  84. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/simplyblock_core/scripts/docker-compose-swarm.yml +0 -0
  85. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/simplyblock_core/scripts/foundation.yml +0 -0
  86. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/simplyblock_core/scripts/haproxy.cfg +0 -0
  87. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/simplyblock_core/scripts/helpers/__init__.py +0 -0
  88. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/simplyblock_core/scripts/helpers/deploy_cluster.sh +0 -0
  89. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/simplyblock_core/scripts/helpers/destroy_cluster.sh +0 -0
  90. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/simplyblock_core/scripts/helpers/nvme_disconnect_by_ip.py +0 -0
  91. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/simplyblock_core/scripts/install_deps.sh +0 -0
  92. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/simplyblock_core/scripts/objstore.yml +0 -0
  93. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/simplyblock_core/scripts/prepare_fdb.sh +0 -0
  94. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/simplyblock_core/scripts/prometheus.yml.j2 +0 -0
  95. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/simplyblock_core/scripts/run_ssh.sh +0 -0
  96. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/simplyblock_core/scripts/set_db_config.sh +0 -0
  97. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/simplyblock_core/scripts/stack_deploy_wait.sh +0 -0
  98. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/simplyblock_core/services/__init__.py +0 -0
  99. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/simplyblock_core/services/cached_lvol_stat_collector.py +0 -0
  100. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/simplyblock_core/services/caching_node_monitor.py +0 -0
  101. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/simplyblock_core/services/cap_monitor.py +0 -0
  102. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/simplyblock_core/services/capacity_and_stats_collector.py +0 -0
  103. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/simplyblock_core/services/device_monitor.py +0 -0
  104. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/simplyblock_core/services/install_service.sh +0 -0
  105. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/simplyblock_core/services/lvol_monitor.py +0 -0
  106. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/simplyblock_core/services/lvol_stat_collector.py +0 -0
  107. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/simplyblock_core/services/main_distr_event_collector.py +0 -0
  108. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/simplyblock_core/services/mgmt_node_monitor.py +0 -0
  109. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/simplyblock_core/services/new_device_discovery.py +0 -0
  110. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/simplyblock_core/services/remove_service.sh +0 -0
  111. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/simplyblock_core/services/service_template.service +0 -0
  112. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/simplyblock_core/services/spdk/__init__.py +0 -0
  113. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/simplyblock_core/services/spdk/client.py +0 -0
  114. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/simplyblock_core/services/spdk_http_proxy_server.py +0 -0
  115. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/simplyblock_core/services/tasks_cluster_status.py +0 -0
  116. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/simplyblock_core/services/tasks_runner_failed_migration.py +0 -0
  117. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/simplyblock_core/services/tasks_runner_migration.py +0 -0
  118. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/simplyblock_core/services/tasks_runner_new_dev_migration.py +0 -0
  119. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/simplyblock_core/services/tasks_runner_node_add.py +0 -0
  120. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/simplyblock_core/services/tasks_runner_port_allow.py +0 -0
  121. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/simplyblock_core/services/tasks_runner_restart.py +0 -0
  122. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/simplyblock_core/shell_utils.py +0 -0
  123. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/simplyblock_core/snode_client.py +0 -0
  124. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/simplyblock_core/test/test_models.py +0 -0
  125. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/simplyblock_core/test/test_utils.py +0 -0
  126. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/simplyblock_core/utils.py +0 -0
  127. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/simplyblock_core/workers/cleanup_foundationdb.py +0 -0
  128. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/simplyblock_web/README.md +0 -0
  129. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/simplyblock_web/__init__.py +0 -0
  130. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/simplyblock_web/app.py +0 -0
  131. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/simplyblock_web/auth_middleware.py +0 -0
  132. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/simplyblock_web/blueprints/__init__.py +0 -0
  133. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/simplyblock_web/blueprints/caching_node_ops.py +0 -0
  134. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/simplyblock_web/blueprints/caching_node_ops_k8s.py +0 -0
  135. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/simplyblock_web/blueprints/node_api_basic.py +0 -0
  136. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/simplyblock_web/blueprints/node_api_caching_docker.py +0 -0
  137. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/simplyblock_web/blueprints/node_api_caching_ks.py +0 -0
  138. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/simplyblock_web/blueprints/snode_ops.py +0 -0
  139. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/simplyblock_web/blueprints/snode_ops_k8s.py +0 -0
  140. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/simplyblock_web/blueprints/swagger_ui_blueprint.py +0 -0
  141. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/simplyblock_web/blueprints/web_api_caching_node.py +0 -0
  142. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/simplyblock_web/blueprints/web_api_cluster.py +0 -0
  143. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/simplyblock_web/blueprints/web_api_deployer.py +0 -0
  144. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/simplyblock_web/blueprints/web_api_device.py +0 -0
  145. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/simplyblock_web/blueprints/web_api_lvol.py +0 -0
  146. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/simplyblock_web/blueprints/web_api_mgmt_node.py +0 -0
  147. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/simplyblock_web/blueprints/web_api_pool.py +0 -0
  148. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/simplyblock_web/blueprints/web_api_snapshot.py +0 -0
  149. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/simplyblock_web/blueprints/web_api_storage_node.py +0 -0
  150. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/simplyblock_web/node_configure.py +0 -0
  151. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/simplyblock_web/node_utils.py +0 -0
  152. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/simplyblock_web/node_utils_k8s.py +0 -0
  153. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/simplyblock_web/node_webapp.py +0 -0
  154. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/simplyblock_web/static/SimplyBlock-API.postman_collection.json +0 -0
  155. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/simplyblock_web/static/delete.py +0 -0
  156. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/simplyblock_web/static/deploy.py +0 -0
  157. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/simplyblock_web/static/deploy_cnode.yaml +0 -0
  158. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/simplyblock_web/static/deploy_spdk.yaml +0 -0
  159. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/simplyblock_web/static/is_up.py +0 -0
  160. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/simplyblock_web/static/list_deps.py +0 -0
  161. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/simplyblock_web/static/rpac.yaml +0 -0
  162. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/simplyblock_web/static/swagger.yaml +0 -0
  163. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/simplyblock_web/static/tst.py +0 -0
  164. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/simplyblock_web/templates/caching_deploy_spdk.yaml.j2 +0 -0
  165. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/simplyblock_web/templates/storage_deploy_spdk.yaml.j2 +0 -0
  166. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/simplyblock_web/test/api/test_lvol.py +0 -0
  167. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/simplyblock_web/test/api/test_pool.py +0 -0
  168. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/simplyblock_web/test/api/test_snapshot.py +0 -0
  169. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/simplyblock_web/test/api/test_storage_node.py +0 -0
  170. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/simplyblock_web/test/conftest.py +0 -0
  171. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/simplyblock_web/test/pytest.ini +0 -0
  172. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/simplyblock_web/test/requirements.txt +0 -0
  173. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/simplyblock_web/test/util.py +0 -0
  174. {sbcli-pre-25.6.1 → sbcli-pre-25.6.2}/simplyblock_web/utils.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: sbcli-pre
3
- Version: 25.6.1
3
+ Version: 25.6.2
4
4
  Summary: CLI for managing SimplyBlock cluster
5
5
  Home-page: https://www.simplyblock.io/
6
6
  Author: Hamdy
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: sbcli-pre
3
- Version: 25.6.1
3
+ Version: 25.6.2
4
4
  Summary: CLI for managing SimplyBlock cluster
5
5
  Home-page: https://www.simplyblock.io/
6
6
  Author: Hamdy
@@ -2,7 +2,7 @@
2
2
  import time
3
3
 
4
4
  from typing import Any
5
- from logging import DEBUG, ERROR
5
+ from logging import DEBUG, ERROR, INFO
6
6
 
7
7
  import jc
8
8
 
@@ -22,7 +22,7 @@ def check_bdev(name, *, rpc_client=None, bdev_names=None):
22
22
  ((bdev_names is not None) and (name in bdev_names)) or
23
23
  (rpc_client is not None and (rpc_client.get_bdevs(name) is not None))
24
24
  )
25
- logger.log(DEBUG if present else ERROR, f"Checking bdev: {name} ... " + ('ok' if present else 'failed'))
25
+ logger.log(INFO if present else ERROR, f"Checking bdev: {name} ... " + ('ok' if present else 'failed'))
26
26
  return present
27
27
 
28
28
 
@@ -38,9 +38,7 @@ def check_subsystem(nqn, *, rpc_client=None, nqns=None, ns_uuid=None):
38
38
  logger.error(f"Checking subsystem {nqn} ... not found")
39
39
  return False
40
40
 
41
- logger.debug(f"Checking subsystem {nqn} ... ok")
42
-
43
- listeners = len(subsystem['listen_addresses'])
41
+ logger.info(f"Checking subsystem {nqn} ... ok")
44
42
 
45
43
  if ns_uuid:
46
44
  for ns in subsystem['namespaces']:
@@ -51,9 +49,15 @@ def check_subsystem(nqn, *, rpc_client=None, nqns=None, ns_uuid=None):
51
49
  namespaces = 0
52
50
  else:
53
51
  namespaces = len(subsystem['namespaces'])
52
+ logger.log(INFO if namespaces else ERROR, f"Checking namespaces: {namespaces} ... " + ('ok' if namespaces else 'not found'))
53
+
54
+ listeners = subsystem['listen_addresses']
55
+ if not listeners:
56
+ logger.error(f"Checking listener for {nqn} ... not found")
57
+ else:
58
+ for listener in listeners:
59
+ logger.info(f"Checking listener {listener['traddr']}:{listener['trsvcid']} ... ok")
54
60
 
55
- logger.log(DEBUG if listeners else ERROR, f"Checking listener: {listeners} ... " + ('ok' if listeners else 'not found'))
56
- logger.log(DEBUG if namespaces else ERROR, f"Checking namespaces: {namespaces} ... " + ('ok' if namespaces else 'not found'))
57
61
  return listeners and namespaces
58
62
 
59
63
 
@@ -224,8 +228,7 @@ def _check_node_hublvol(node: StorageNode, node_bdev_names=None, node_lvols_nqns
224
228
  return passed
225
229
 
226
230
 
227
-
228
- def _check_sec_node_hublvol(node: StorageNode, node_bdev=None, node_lvols_nqns=None):
231
+ def _check_sec_node_hublvol(node: StorageNode, node_bdev=None, node_lvols_nqns=None, auto_fix=False):
229
232
  db_controller = DBController()
230
233
  primary_node = db_controller.get_storage_node_by_id(node.lvstore_stack_secondary_1)
231
234
  if primary_node is None:
@@ -257,11 +260,18 @@ def _check_sec_node_hublvol(node: StorageNode, node_bdev=None, node_lvols_nqns=N
257
260
 
258
261
 
259
262
  ret = rpc_client.bdev_nvme_controller_list(primary_node.hublvol.bdev_name)
260
- if ret:
261
- logger.info(f"Checking controller: {primary_node.hublvol.bdev_name} ... ok")
262
- else:
263
- logger.info(f"Checking controller: {primary_node.hublvol.bdev_name} ... failed")
264
- passed = False
263
+ passed = bool(ret)
264
+ logger.info(f"Checking controller: {primary_node.hublvol.bdev_name} ... {passed}")
265
+
266
+ if not passed and auto_fix and primary_node.lvstore_status == "ready" \
267
+ and primary_node.status in [StorageNode.STATUS_ONLINE, StorageNode.STATUS_DOWN]:
268
+ try:
269
+ node.connect_to_hublvol(primary_node)
270
+ except Exception as e:
271
+ logger.error("Error establishing hublvol: %s", e)
272
+ ret = rpc_client.bdev_nvme_controller_list(primary_node.hublvol.bdev_name)
273
+ passed = bool(ret)
274
+ logger.info(f"Checking controller: {primary_node.hublvol.bdev_name} ... {passed}")
265
275
 
266
276
  passed &= check_bdev(primary_node.hublvol.get_remote_bdev_name(), bdev_names=node_bdev)
267
277
  cl = db_controller.get_cluster_by_id(node.cluster_id)
@@ -467,6 +477,7 @@ def check_node(node_id, with_devices=True):
467
477
  logger.info("Skipping devices checks because RPC check failed")
468
478
  else:
469
479
  logger.info(f"Node device count: {len(snode.nvme_devices)}")
480
+ print("*" * 100)
470
481
  for dev in snode.nvme_devices:
471
482
  if dev.status in [NVMeDevice.STATUS_ONLINE, NVMeDevice.STATUS_UNAVAILABLE]:
472
483
  ret = check_device(dev.get_id())
@@ -476,15 +487,16 @@ def check_node(node_id, with_devices=True):
476
487
  print("*" * 100)
477
488
 
478
489
  logger.info(f"Node remote device: {len(snode.remote_devices)}")
490
+ print("*" * 100)
479
491
  rpc_client = RPCClient(
480
492
  snode.mgmt_ip, snode.rpc_port,
481
493
  snode.rpc_username, snode.rpc_password,
482
494
  timeout=5, retry=1)
483
495
  for remote_device in snode.remote_devices:
484
- node_remote_devices_check &= check_bdev(remote_device.remote_bdev, rpc_client=rpc_client)
496
+ node_remote_devices_check &= check_remote_device(remote_device.get_id(), snode)
497
+ print("*" * 100)
485
498
 
486
499
  if snode.jm_device:
487
- print("*" * 100)
488
500
  jm_device = snode.jm_device
489
501
  logger.info(f"Node JM: {jm_device.get_id()}")
490
502
  ret = check_jm_device(jm_device.get_id())
@@ -498,7 +510,27 @@ def check_node(node_id, with_devices=True):
498
510
  print("*" * 100)
499
511
  logger.info(f"Node remote JMs: {len(snode.remote_jm_devices)}")
500
512
  for remote_device in snode.remote_jm_devices:
501
- node_remote_devices_check &= check_bdev(remote_device.remote_bdev, rpc_client=rpc_client)
513
+
514
+ name = f'remote_{remote_device.jm_bdev}n1'
515
+ bdev_info = rpc_client.get_bdevs(name)
516
+ logger.log(INFO if bdev_info else ERROR,
517
+ f"Checking bdev: {name} ... " + ('ok' if bdev_info else 'failed'))
518
+ node_remote_devices_check &= bool(bdev_info)
519
+
520
+ controller_info = rpc_client.bdev_nvme_controller_list(f'remote_{remote_device.jm_bdev}')
521
+ if controller_info:
522
+ addr = controller_info[0]['ctrlrs'][0]['trid']['traddr']
523
+ port = controller_info[0]['ctrlrs'][0]['trid']['trsvcid']
524
+ logger.info(f"IP Address: {addr}:{port}")
525
+
526
+ if remote_device.nvmf_multipath:
527
+ if controller_info and "alternate_trids" in controller_info[0]['ctrlrs'][0]:
528
+ addr = controller_info[0]['ctrlrs'][0]['alternate_trids'][0]['traddr']
529
+ port = controller_info[0]['ctrlrs'][0]['alternate_trids'][0]['trsvcid']
530
+ logger.info(f"IP Address: {addr}:{port}")
531
+
532
+ if bdev_info:
533
+ logger.info(f"multipath policy: {bdev_info[0]['driver_specific']['mp_policy']}")
502
534
 
503
535
  print("*" * 100)
504
536
  if snode.lvstore_stack:
@@ -570,12 +602,11 @@ def check_device(device_id):
570
602
 
571
603
  passed &= check_subsystem(device.nvmf_nqn, rpc_client=rpc_client)
572
604
 
573
- if device.status == NVMeDevice.STATUS_ONLINE:
574
- logger.info("Checking other node's connection to this device...")
575
- ret = check_remote_device(device_id)
576
- if not ret:
577
- logger.warning(f"Remote device {device_id} is not accessible from other nodes")
578
- # passed &= ret
605
+ # if device.status == NVMeDevice.STATUS_ONLINE:
606
+ # logger.info("Checking other node's connection to this device...")
607
+ # ret = check_remote_device(device_id)
608
+ # if not ret:
609
+ # logger.warning(f"Remote device {device_id} is not accessible from other nodes")# # passed &= ret
579
610
 
580
611
  except Exception as e:
581
612
  logger.error(f"Failed to connect to node's SPDK: {e}")
@@ -584,7 +615,7 @@ def check_device(device_id):
584
615
  return passed
585
616
 
586
617
 
587
- def check_remote_device(device_id):
618
+ def check_remote_device(device_id, target_node=None):
588
619
  db_controller = DBController()
589
620
  device = db_controller.get_storage_device_by_id(device_id)
590
621
  if not device:
@@ -596,13 +627,34 @@ def check_remote_device(device_id):
596
627
  return False
597
628
 
598
629
  result = True
599
- for node in db_controller.get_storage_nodes_by_cluster_id(snode.cluster_id):
630
+ if target_node:
631
+ nodes = [target_node]
632
+ else:
633
+ nodes = db_controller.get_storage_nodes_by_cluster_id(snode.cluster_id)
634
+ for node in nodes:
600
635
  if node.status == StorageNode.STATUS_ONLINE:
601
636
  if node.get_id() == snode.get_id():
602
637
  continue
603
- logger.info(f"Connecting to node: {node.get_id()}")
638
+ logger.info(f"Checking device: {device_id}")
604
639
  rpc_client = RPCClient(node.mgmt_ip, node.rpc_port, node.rpc_username, node.rpc_password, timeout=5, retry=1)
605
- result &= check_bdev(f'remote_{device.alceml_bdev}n1', rpc_client=rpc_client)
640
+ name = f'remote_{device.alceml_bdev}n1'
641
+ bdev_info = rpc_client.get_bdevs(name)
642
+ logger.log(DEBUG if bdev_info else ERROR, f"Checking bdev: {name} ... " + ('ok' if bdev_info else 'failed'))
643
+ result &= bool(bdev_info)
644
+ controller_info = rpc_client.bdev_nvme_controller_list(f'remote_{device.alceml_bdev}')
645
+ if controller_info:
646
+ addr = controller_info[0]['ctrlrs'][0]['trid']['traddr']
647
+ port = controller_info[0]['ctrlrs'][0]['trid']['trsvcid']
648
+ logger.info(f"IP Address: {addr}:{port}")
649
+
650
+ if device.nvmf_multipath:
651
+ if controller_info and "alternate_trids" in controller_info[0]['ctrlrs'][0]:
652
+ addr = controller_info[0]['ctrlrs'][0]['alternate_trids'][0]['traddr']
653
+ port = controller_info[0]['ctrlrs'][0]['alternate_trids'][0]['trsvcid']
654
+ logger.info(f"IP Address: {addr}:{port}")
655
+
656
+ if bdev_info:
657
+ logger.info(f"multipath policy: {bdev_info[0]['driver_specific']['mp_policy']}")
606
658
 
607
659
  return result
608
660
 
@@ -725,6 +777,8 @@ def check_jm_device(device_id):
725
777
  snode.rpc_username, snode.rpc_password, timeout=5, retry=2)
726
778
 
727
779
  passed &= check_bdev(jm_device.jm_bdev, rpc_client=rpc_client)
780
+ if snode.enable_ha_jm:
781
+ passed &= check_subsystem(jm_device.nvmf_nqn, rpc_client=rpc_client)
728
782
 
729
783
  except Exception as e:
730
784
  logger.error(f"Failed to connect to node's SPDK: {e}")
@@ -1,5 +1,5 @@
1
1
  SIMPLY_BLOCK_COMMAND_NAME=sbcli-pre
2
- SIMPLY_BLOCK_VERSION=25.6.1
2
+ SIMPLY_BLOCK_VERSION=25.6.2
3
3
 
4
4
 
5
5
  SIMPLY_BLOCK_DOCKER_IMAGE=simplyblock/simplyblock:R25.6-PRE
@@ -46,6 +46,7 @@ class NVMeDevice(BaseModel):
46
46
  nvmf_ip: str = ""
47
47
  nvmf_nqn: str = ""
48
48
  nvmf_port: int = 0
49
+ nvmf_multipath: bool = False
49
50
  overload_percentage: int = 0 # Unused
50
51
  partition_jm_bdev: str = "" # Unused
51
52
  partition_jm_size: int = 0 # Unused
@@ -241,10 +241,7 @@ class StorageNode(BaseNodeObject):
241
241
  ret = rpc_client.bdev_nvme_attach_controller_tcp(
242
242
  primary_node.hublvol.bdev_name, primary_node.hublvol.nqn,
243
243
  ip, primary_node.hublvol.nvmf_port)
244
- if ret:
245
- remote_bdev = ret[0]
246
- break
247
- else:
244
+ if not ret:
248
245
  logger.warning(f'Failed to connect to hublvol on {ip}')
249
246
 
250
247
  if not rpc_client.bdev_lvol_set_lvs_opts(
@@ -562,7 +562,7 @@ class RPCClient:
562
562
  # ultra/DISTR_v2/src_code_app_spdk/specs/message_format_rpcs__distrib__v5.txt#L396C1-L396C27
563
563
  return self._request("distr_status_events_update", params)
564
564
 
565
- def bdev_nvme_attach_controller_tcp(self, name, nqn, ip, port):
565
+ def bdev_nvme_attach_controller_tcp(self, name, nqn, ip, port, multipath=False):
566
566
  params = {
567
567
  "name": name,
568
568
  "trtype": "tcp",
@@ -571,11 +571,12 @@ class RPCClient:
571
571
  "trsvcid": str(port),
572
572
  "subnqn": nqn,
573
573
  "fabrics_connect_timeout_us": 100000,
574
- "num_io_queues": 128,
575
- #"ctrlr_loss_timeout_sec": 3,
576
- "multipath":"disable",
577
- # "reconnect_delay_sec":1
574
+ "num_io_queues": 128
578
575
  }
576
+ if multipath:
577
+ params["multipath"] = "failover"
578
+ else:
579
+ params["multipath"] = "disable"
579
580
  return self._request("bdev_nvme_attach_controller", params)
580
581
 
581
582
  def bdev_nvme_attach_controller_tcp_caching(self, name, nqn, ip, port):
@@ -632,7 +633,7 @@ class RPCClient:
632
633
  def bdev_nvme_set_options(self):
633
634
  params = {
634
635
  # "action_on_timeout": "abort",
635
- "bdev_retry_count": 0,
636
+ "bdev_retry_count": 1,
636
637
  "transport_retry_count": 3,
637
638
  "ctrlr_loss_timeout_sec": 1,
638
639
  "fast_io_fail_timeout_sec" : 0,
@@ -1071,3 +1072,10 @@ class RPCClient:
1071
1072
  if ana_state:
1072
1073
  params["ana_state"] = ana_state
1073
1074
  return self._request2("nvmf_subsystem_add_listener", params)
1075
+
1076
+ def bdev_nvme_set_multipath_policy(self, name, policy): # policy: active_active or active_passive
1077
+ params = {
1078
+ "name": name,
1079
+ "policy": policy,
1080
+ }
1081
+ return self._request("bdev_nvme_set_multipath_policy", params)
@@ -225,7 +225,7 @@ while True:
225
225
  if second_node_1 and second_node_1.status == StorageNode.STATUS_ONLINE:
226
226
  lvstore_check &= health_controller._check_node_lvstore(
227
227
  lvstore_stack, second_node_1, auto_fix=True, stack_src_node=snode)
228
- lvstore_check &= health_controller._check_sec_node_hublvol(second_node_1)
228
+ lvstore_check &= health_controller._check_sec_node_hublvol(second_node_1, auto_fix=True)
229
229
 
230
230
  lvol_port_check = False
231
231
  # if node_api_check:
@@ -301,12 +301,14 @@ while True:
301
301
  logger.info(f"Check: node port {snode.mgmt_ip}, {port} ... {ret}")
302
302
  node_port_check &= ret
303
303
 
304
+ node_data_nic_ping_check = False
304
305
  for data_nic in snode.data_nics:
305
306
  if data_nic.ip4_address:
306
307
  data_ping_check = health_controller._check_node_ping(data_nic.ip4_address)
307
308
  logger.info(f"Check: ping data nic {data_nic.ip4_address} ... {data_ping_check}")
308
- if not data_ping_check:
309
- node_port_check = False
309
+ node_data_nic_ping_check |= data_ping_check
310
+
311
+ node_port_check &= node_data_nic_ping_check
310
312
 
311
313
  cluster = db.get_cluster_by_id(cluster.get_id())
312
314
 
@@ -112,7 +112,7 @@ def _create_jm_stack_on_raid(rpc_client, jm_nvme_bdevs, snode, after_restart):
112
112
 
113
113
  pt_name = ""
114
114
  subsystem_nqn = ""
115
- IP = ""
115
+ ip_list = []
116
116
  if snode.enable_ha_jm:
117
117
  # add pass through
118
118
  pt_name = f"{jm_bdev}_PT"
@@ -125,19 +125,24 @@ def _create_jm_stack_on_raid(rpc_client, jm_nvme_bdevs, snode, after_restart):
125
125
  logger.info("creating subsystem %s", subsystem_nqn)
126
126
  ret = rpc_client.subsystem_create(subsystem_nqn, 'sbcli-cn', jm_bdev)
127
127
  logger.info(f"add {pt_name} to subsystem")
128
- ret = rpc_client.nvmf_subsystem_add_ns(subsystem_nqn, pt_name)
128
+ ret = rpc_client.nvmf_subsystem_add_ns(subsystem_nqn, pt_name, alceml_id)
129
129
  if not ret:
130
130
  logger.error(f"Failed to add: {pt_name} to the subsystem: {subsystem_nqn}")
131
131
  return False
132
132
 
133
- IP = None
134
133
  for iface in snode.data_nics:
135
134
  if iface.ip4_address:
136
135
  tr_type = iface.get_transport_type()
137
136
  logger.info("adding listener for %s on IP %s" % (subsystem_nqn, iface.ip4_address))
138
137
  ret = rpc_client.listeners_create(subsystem_nqn, tr_type, iface.ip4_address, snode.nvmf_port)
139
- IP = iface.ip4_address
140
- break
138
+ ip_list.append(iface.ip4_address)
139
+
140
+ if len(ip_list) > 1:
141
+ IP = ",".join(ip_list)
142
+ multipath = True
143
+ else:
144
+ IP = ip_list[0]
145
+ multipath = False
141
146
 
142
147
  ret = rpc_client.get_bdevs(raid_bdev)
143
148
 
@@ -155,6 +160,7 @@ def _create_jm_stack_on_raid(rpc_client, jm_nvme_bdevs, snode, after_restart):
155
160
  'nvmf_nqn': subsystem_nqn,
156
161
  'nvmf_ip': IP,
157
162
  'nvmf_port': snode.nvmf_port,
163
+ 'nvmf_multipath': multipath,
158
164
  })
159
165
 
160
166
 
@@ -192,7 +198,7 @@ def _create_jm_stack_on_device(rpc_client, nvme, snode, after_restart):
192
198
 
193
199
  pt_name = ""
194
200
  subsystem_nqn = ""
195
- IP = ""
201
+ ip_list = []
196
202
  if snode.enable_ha_jm:
197
203
  # add pass through
198
204
  pt_name = f"{jm_bdev}_PT"
@@ -205,19 +211,24 @@ def _create_jm_stack_on_device(rpc_client, nvme, snode, after_restart):
205
211
  logger.info("creating subsystem %s", subsystem_nqn)
206
212
  ret = rpc_client.subsystem_create(subsystem_nqn, 'sbcli-cn', jm_bdev)
207
213
  logger.info(f"add {pt_name} to subsystem")
208
- ret = rpc_client.nvmf_subsystem_add_ns(subsystem_nqn, pt_name)
214
+ ret = rpc_client.nvmf_subsystem_add_ns(subsystem_nqn, pt_name, alceml_id)
209
215
  if not ret:
210
216
  logger.error(f"Failed to add: {pt_name} to the subsystem: {subsystem_nqn}")
211
217
  return False
212
218
 
213
- IP = None
214
219
  for iface in snode.data_nics:
215
220
  if iface.ip4_address:
216
221
  tr_type = iface.get_transport_type()
217
222
  logger.info("adding listener for %s on IP %s" % (subsystem_nqn, iface.ip4_address))
218
223
  ret = rpc_client.listeners_create(subsystem_nqn, tr_type, iface.ip4_address, snode.nvmf_port)
219
- IP = iface.ip4_address
220
- break
224
+ ip_list.append(iface.ip4_address)
225
+
226
+ if len(ip_list) > 1:
227
+ IP = ",".join(ip_list)
228
+ multipath = True
229
+ else:
230
+ IP = ip_list[0]
231
+ multipath = False
221
232
 
222
233
  return JMDevice({
223
234
  'uuid': alceml_id,
@@ -235,6 +246,7 @@ def _create_jm_stack_on_device(rpc_client, nvme, snode, after_restart):
235
246
  'nvmf_nqn': subsystem_nqn,
236
247
  'nvmf_ip': IP,
237
248
  'nvmf_port': snode.nvmf_port,
249
+ 'nvmf_multipath': multipath,
238
250
  })
239
251
 
240
252
 
@@ -286,21 +298,27 @@ def _create_storage_device_stack(rpc_client, nvme, snode, after_restart):
286
298
  subsystem_nqn = snode.subsystem + ":dev:" + alceml_id
287
299
  logger.info("creating subsystem %s", subsystem_nqn)
288
300
  ret = rpc_client.subsystem_create(subsystem_nqn, 'sbcli-cn', alceml_id)
289
- IP = None
301
+ ip_list = []
290
302
  for iface in snode.data_nics:
291
303
  if iface.ip4_address:
292
304
  tr_type = iface.get_transport_type()
293
305
  logger.info("adding listener for %s on IP %s" % (subsystem_nqn, iface.ip4_address))
294
306
  ret = rpc_client.listeners_create(subsystem_nqn, tr_type, iface.ip4_address, snode.nvmf_port)
295
- IP = iface.ip4_address
296
- break
307
+ ip_list.append(iface.ip4_address)
308
+
297
309
  logger.info(f"add {pt_name} to subsystem")
298
- ret = rpc_client.nvmf_subsystem_add_ns(subsystem_nqn, pt_name)
310
+ ret = rpc_client.nvmf_subsystem_add_ns(subsystem_nqn, pt_name, alceml_id)
299
311
  if not ret:
300
312
  logger.error(f"Failed to add: {pt_name} to the subsystem: {subsystem_nqn}")
301
313
  return None
302
- # if snode.enable_test_device:
303
- # nvme.testing_bdev = test_name
314
+
315
+ if len(ip_list) > 1:
316
+ IP = ",".join(ip_list)
317
+ multipath = True
318
+ else:
319
+ IP = ip_list[0]
320
+ multipath = False
321
+
304
322
  nvme.alceml_bdev = alceml_bdev
305
323
  nvme.pt_bdev = pt_name
306
324
  nvme.qos_bdev = qos_bdev
@@ -309,6 +327,7 @@ def _create_storage_device_stack(rpc_client, nvme, snode, after_restart):
309
327
  nvme.nvmf_ip = IP
310
328
  nvme.nvmf_port = snode.nvmf_port
311
329
  nvme.io_error = False
330
+ nvme.nvmf_multipath = multipath
312
331
  # if nvme.status != NVMeDevice.STATUS_NEW:
313
332
  # nvme.status = NVMeDevice.STATUS_ONLINE
314
333
  return nvme
@@ -560,7 +579,7 @@ def _prepare_cluster_devices_on_restart(snode, clear_data=False):
560
579
  logger.info("creating subsystem %s", subsystem_nqn)
561
580
  ret = rpc_client.subsystem_create(subsystem_nqn, 'sbcli-cn', jm_bdev)
562
581
  logger.info(f"add {pt_name} to subsystem")
563
- ret = rpc_client.nvmf_subsystem_add_ns(subsystem_nqn, pt_name)
582
+ ret = rpc_client.nvmf_subsystem_add_ns(subsystem_nqn, pt_name, snode.get_id())
564
583
  if not ret:
565
584
  logger.error(f"Failed to add: {pt_name} to the subsystem: {subsystem_nqn}")
566
585
  return False
@@ -570,7 +589,6 @@ def _prepare_cluster_devices_on_restart(snode, clear_data=False):
570
589
  tr_type = iface.get_transport_type()
571
590
  logger.info("adding listener for %s on IP %s" % (subsystem_nqn, iface.ip4_address))
572
591
  ret = rpc_client.listeners_create(subsystem_nqn, tr_type, iface.ip4_address, snode.nvmf_port)
573
- break
574
592
 
575
593
  return True
576
594
 
@@ -621,16 +639,23 @@ def _connect_to_remote_devs(this_node, force_conect_restarting_nodes=False):
621
639
  # logger.info(f"detaching {name} from {this_node.get_id()}")
622
640
  # rpc_client.bdev_nvme_detach_controller(name)
623
641
  # time.sleep(1)
624
-
625
- logger.info(f"Connecting {name} to {this_node.get_id()}")
626
- rpc_client.bdev_nvme_attach_controller_tcp(name, dev.nvmf_nqn, dev.nvmf_ip, dev.nvmf_port)
642
+ if dev.nvmf_multipath:
643
+ for ip in dev.nvmf_ip.split(","):
644
+ logger.info(f"Connecting {name} to {this_node.get_id()}")
645
+ rpc_client.bdev_nvme_attach_controller_tcp(
646
+ name, dev.nvmf_nqn, ip, dev.nvmf_port, multipath=True)
647
+ rpc_client.bdev_nvme_set_multipath_policy(bdev_name, "active_active")
648
+ else:
649
+ logger.info(f"Connecting {name} to {this_node.get_id()}")
650
+ rpc_client.bdev_nvme_attach_controller_tcp(
651
+ name, dev.nvmf_nqn, dev.nvmf_ip, dev.nvmf_port, multipath=False)
627
652
  ret = rpc_client.get_bdevs(bdev_name)
628
653
  if not ret:
629
654
  logger.error(f"Failed to connect to device: {dev.get_id()}")
630
655
  continue
631
656
  dev.remote_bdev = bdev_name
632
657
  remote_devices.append(dev)
633
- # distr_controller.send_dev_status_event(dev, dev.status, this_node)
658
+
634
659
  return remote_devices
635
660
 
636
661
 
@@ -700,14 +725,27 @@ def _connect_to_remote_jm_devs(this_node, jm_ids=None):
700
725
  rpc_client.bdev_nvme_detach_controller(name)
701
726
  time.sleep(1)
702
727
 
703
- logger.info(f"Connecting {name} to {this_node.get_id()}")
704
- ret = rpc_client.bdev_nvme_attach_controller_tcp(
705
- name, org_dev.nvmf_nqn, org_dev.nvmf_ip, org_dev.nvmf_port)
706
- if ret:
728
+ if org_dev.nvmf_multipath:
729
+ for ip in org_dev.nvmf_ip.split(","):
730
+ logger.info(f"Connecting {name} to {this_node.get_id()}")
731
+ ret = rpc_client.bdev_nvme_attach_controller_tcp(
732
+ name, org_dev.nvmf_nqn, ip, org_dev.nvmf_port, multipath=True)
733
+ rpc_client.bdev_nvme_set_multipath_policy(bdev_name, "active_active")
734
+
735
+ # if ret:
707
736
  org_dev.status = JMDevice.STATUS_ONLINE
737
+ # else:
738
+ # logger.error(f"failed to connect to remote JM {name}")
739
+ # org_dev.status = JMDevice.STATUS_UNAVAILABLE
708
740
  else:
709
- logger.error(f"failed to connect to remote JM {name}")
710
- org_dev.status = JMDevice.STATUS_UNAVAILABLE
741
+ logger.info(f"Connecting {name} to {this_node.get_id()}")
742
+ ret = rpc_client.bdev_nvme_attach_controller_tcp(
743
+ name, org_dev.nvmf_nqn, org_dev.nvmf_ip, org_dev.nvmf_port, multipath=False)
744
+ if ret:
745
+ org_dev.status = JMDevice.STATUS_ONLINE
746
+ else:
747
+ logger.error(f"failed to connect to remote JM {name}")
748
+ org_dev.status = JMDevice.STATUS_UNAVAILABLE
711
749
  new_devs.append(org_dev)
712
750
  else:
713
751
  org_dev.status = JMDevice.STATUS_UNAVAILABLE
@@ -169,32 +169,32 @@ def get_data():
169
169
  ng[g].labels(cluster=cl.get_id(), snode=node.get_id()).set(node.get_status_code())
170
170
  elif v == "health_check":
171
171
  ng[g].labels(cluster=cl.get_id(), snode=node.get_id()).set(node.health_check)
172
-
173
- for reactor in reactor_data.get("reactors", []):
174
- lcore = reactor.get("lcore")
175
- core_idle = reactor.get("idle", 0)
176
- core_busy = reactor.get("busy", 0)
177
- irq = reactor.get("irq", 0)
178
- sys = reactor.get("sys", 0)
179
-
180
- thread_names = ", ".join(thread["name"] for thread in reactor.get("lw_threads", []))
181
- if v == "cpu_busy_percentage":
182
- for thread in reactor.get("lw_threads", []):
183
- thread_name = thread.get("name")
184
- thread_id = thread.get("id")
185
- thread_busy = thread_busy_map.get(thread_id, 0)
186
-
187
- total_core_cycles = core_busy + core_idle
188
- cpu_usage_percent = (thread_busy / total_core_cycles) * 100 if total_core_cycles > 0 else 0
189
-
190
- ng[g].labels(cluster=cl.get_id(), snode=node.get_id(), thread_name=thread_name).set(cpu_usage_percent)
191
-
192
- elif v == "cpu_core_utilization":
193
-
194
- total_cycle = core_busy + irq + sys
195
- total_with_idle = total_cycle + core_idle
196
- core_utilization = (total_cycle / total_with_idle) * 100 if total_with_idle > 0 else 0
197
- ng[g].labels(cluster=cl.get_id(), snode=node.get_id(), core_id=str(lcore), thread_names=thread_names).set(core_utilization)
172
+ if reactor_data and "reactors" in reactor_data:
173
+ for reactor in reactor_data.get("reactors", []):
174
+ lcore = reactor.get("lcore")
175
+ core_idle = reactor.get("idle", 0)
176
+ core_busy = reactor.get("busy", 0)
177
+ irq = reactor.get("irq", 0)
178
+ sys = reactor.get("sys", 0)
179
+
180
+ thread_names = ", ".join(thread["name"] for thread in reactor.get("lw_threads", []))
181
+ if v == "cpu_busy_percentage":
182
+ for thread in reactor.get("lw_threads", []):
183
+ thread_name = thread.get("name")
184
+ thread_id = thread.get("id")
185
+ thread_busy = thread_busy_map.get(thread_id, 0)
186
+
187
+ total_core_cycles = core_busy + core_idle
188
+ cpu_usage_percent = (thread_busy / total_core_cycles) * 100 if total_core_cycles > 0 else 0
189
+
190
+ ng[g].labels(cluster=cl.get_id(), snode=node.get_id(), thread_name=thread_name).set(cpu_usage_percent)
191
+
192
+ elif v == "cpu_core_utilization":
193
+
194
+ total_cycle = core_busy + irq + sys
195
+ total_with_idle = total_cycle + core_idle
196
+ core_utilization = (total_cycle / total_with_idle) * 100 if total_with_idle > 0 else 0
197
+ ng[g].labels(cluster=cl.get_id(), snode=node.get_id(), core_id=str(lcore), thread_names=thread_names).set(core_utilization)
198
198
 
199
199
 
200
200
  for device in node.nvme_devices:
File without changes
File without changes
File without changes
File without changes
File without changes