octavia 13.0.0.0rc1__py3-none-any.whl → 14.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (135) hide show
  1. octavia/amphorae/backends/agent/api_server/lvs_listener_base.py +1 -1
  2. octavia/amphorae/backends/agent/api_server/osutils.py +5 -5
  3. octavia/amphorae/backends/agent/api_server/plug.py +3 -2
  4. octavia/amphorae/backends/agent/api_server/rules_schema.py +52 -0
  5. octavia/amphorae/backends/agent/api_server/server.py +28 -1
  6. octavia/amphorae/backends/utils/interface.py +45 -6
  7. octavia/amphorae/backends/utils/interface_file.py +9 -6
  8. octavia/amphorae/backends/utils/nftable_utils.py +125 -0
  9. octavia/amphorae/drivers/driver_base.py +27 -0
  10. octavia/amphorae/drivers/haproxy/rest_api_driver.py +42 -10
  11. octavia/amphorae/drivers/health/heartbeat_udp.py +2 -2
  12. octavia/amphorae/drivers/keepalived/vrrp_rest_driver.py +2 -1
  13. octavia/amphorae/drivers/noop_driver/driver.py +25 -0
  14. octavia/api/app.py +3 -0
  15. octavia/api/common/pagination.py +2 -2
  16. octavia/api/drivers/amphora_driver/flavor_schema.py +6 -1
  17. octavia/api/root_controller.py +4 -1
  18. octavia/api/v2/controllers/health_monitor.py +0 -1
  19. octavia/api/v2/controllers/l7policy.py +0 -1
  20. octavia/api/v2/controllers/l7rule.py +0 -1
  21. octavia/api/v2/controllers/listener.py +0 -1
  22. octavia/api/v2/controllers/load_balancer.py +13 -7
  23. octavia/api/v2/controllers/member.py +6 -3
  24. octavia/api/v2/controllers/pool.py +6 -7
  25. octavia/api/v2/types/load_balancer.py +5 -1
  26. octavia/api/v2/types/pool.py +1 -1
  27. octavia/certificates/common/pkcs12.py +9 -9
  28. octavia/certificates/manager/barbican.py +24 -16
  29. octavia/certificates/manager/castellan_mgr.py +12 -7
  30. octavia/certificates/manager/local.py +4 -4
  31. octavia/certificates/manager/noop.py +106 -0
  32. octavia/cmd/driver_agent.py +1 -1
  33. octavia/cmd/health_checker.py +0 -4
  34. octavia/cmd/health_manager.py +1 -5
  35. octavia/cmd/house_keeping.py +1 -1
  36. octavia/cmd/interface.py +0 -4
  37. octavia/cmd/octavia_worker.py +0 -4
  38. octavia/cmd/prometheus_proxy.py +0 -5
  39. octavia/cmd/status.py +0 -6
  40. octavia/common/base_taskflow.py +1 -1
  41. octavia/common/clients.py +15 -3
  42. octavia/common/config.py +24 -6
  43. octavia/common/constants.py +34 -0
  44. octavia/common/data_models.py +3 -1
  45. octavia/common/exceptions.py +11 -0
  46. octavia/common/jinja/haproxy/combined_listeners/templates/macros.j2 +7 -5
  47. octavia/common/keystone.py +7 -7
  48. octavia/common/tls_utils/cert_parser.py +24 -10
  49. octavia/common/utils.py +6 -0
  50. octavia/common/validate.py +2 -2
  51. octavia/compute/drivers/nova_driver.py +23 -5
  52. octavia/controller/worker/task_utils.py +28 -6
  53. octavia/controller/worker/v2/controller_worker.py +49 -15
  54. octavia/controller/worker/v2/flows/amphora_flows.py +120 -21
  55. octavia/controller/worker/v2/flows/flow_utils.py +15 -13
  56. octavia/controller/worker/v2/flows/listener_flows.py +95 -5
  57. octavia/controller/worker/v2/flows/load_balancer_flows.py +74 -30
  58. octavia/controller/worker/v2/taskflow_jobboard_driver.py +17 -1
  59. octavia/controller/worker/v2/tasks/amphora_driver_tasks.py +145 -24
  60. octavia/controller/worker/v2/tasks/compute_tasks.py +1 -1
  61. octavia/controller/worker/v2/tasks/database_tasks.py +72 -41
  62. octavia/controller/worker/v2/tasks/lifecycle_tasks.py +97 -41
  63. octavia/controller/worker/v2/tasks/network_tasks.py +57 -60
  64. octavia/controller/worker/v2/tasks/shim_tasks.py +28 -0
  65. octavia/db/migration/alembic_migrations/versions/55874a4ceed6_add_l7policy_action_redirect_prefix.py +1 -1
  66. octavia/db/migration/alembic_migrations/versions/5a3ee5472c31_add_cert_expiration__infor_in_amphora_table.py +1 -1
  67. octavia/db/migration/alembic_migrations/versions/6742ca1b27c2_add_l7policy_redirect_http_code.py +1 -1
  68. octavia/db/migration/alembic_migrations/versions/db2a73e82626_add_vnic_type_for_vip.py +36 -0
  69. octavia/db/models.py +1 -0
  70. octavia/db/prepare.py +1 -1
  71. octavia/db/repositories.py +53 -34
  72. octavia/distributor/drivers/driver_base.py +1 -1
  73. octavia/network/base.py +3 -16
  74. octavia/network/data_models.py +4 -1
  75. octavia/network/drivers/neutron/allowed_address_pairs.py +27 -26
  76. octavia/network/drivers/noop_driver/driver.py +10 -23
  77. octavia/tests/common/sample_certs.py +115 -0
  78. octavia/tests/common/sample_haproxy_prometheus +1 -1
  79. octavia/tests/functional/amphorae/backend/agent/api_server/test_server.py +37 -0
  80. octavia/tests/functional/api/test_healthcheck.py +2 -2
  81. octavia/tests/functional/api/v2/base.py +1 -1
  82. octavia/tests/functional/api/v2/test_listener.py +45 -0
  83. octavia/tests/functional/api/v2/test_load_balancer.py +17 -0
  84. octavia/tests/functional/db/base.py +9 -0
  85. octavia/tests/functional/db/test_models.py +2 -1
  86. octavia/tests/functional/db/test_repositories.py +55 -99
  87. octavia/tests/unit/amphorae/backends/agent/api_server/test_osutils.py +4 -2
  88. octavia/tests/unit/amphorae/backends/utils/test_interface.py +201 -1
  89. octavia/tests/unit/amphorae/backends/utils/test_keepalivedlvs_query.py +1 -1
  90. octavia/tests/unit/amphorae/backends/utils/test_nftable_utils.py +194 -0
  91. octavia/tests/unit/amphorae/drivers/haproxy/test_rest_api_driver.py +27 -5
  92. octavia/tests/unit/amphorae/drivers/haproxy/test_rest_api_driver_1_0.py +15 -2
  93. octavia/tests/unit/amphorae/drivers/keepalived/test_vrrp_rest_driver.py +17 -0
  94. octavia/tests/unit/amphorae/drivers/noop_driver/test_driver.py +2 -1
  95. octavia/tests/unit/api/v2/types/test_pool.py +71 -0
  96. octavia/tests/unit/certificates/manager/test_barbican.py +3 -3
  97. octavia/tests/unit/certificates/manager/test_noop.py +53 -0
  98. octavia/tests/unit/common/jinja/haproxy/combined_listeners/test_jinja_cfg.py +16 -17
  99. octavia/tests/unit/common/sample_configs/sample_configs_combined.py +5 -3
  100. octavia/tests/unit/common/test_config.py +35 -0
  101. octavia/tests/unit/common/test_keystone.py +32 -0
  102. octavia/tests/unit/common/test_utils.py +39 -0
  103. octavia/tests/unit/compute/drivers/test_nova_driver.py +22 -0
  104. octavia/tests/unit/controller/worker/test_task_utils.py +58 -2
  105. octavia/tests/unit/controller/worker/v2/flows/test_amphora_flows.py +28 -5
  106. octavia/tests/unit/controller/worker/v2/flows/test_listener_flows.py +64 -16
  107. octavia/tests/unit/controller/worker/v2/flows/test_load_balancer_flows.py +49 -9
  108. octavia/tests/unit/controller/worker/v2/tasks/test_amphora_driver_tasks.py +265 -17
  109. octavia/tests/unit/controller/worker/v2/tasks/test_database_tasks.py +101 -1
  110. octavia/tests/unit/controller/worker/v2/tasks/test_database_tasks_quota.py +19 -19
  111. octavia/tests/unit/controller/worker/v2/tasks/test_network_tasks.py +105 -42
  112. octavia/tests/unit/controller/worker/v2/tasks/test_shim_tasks.py +33 -0
  113. octavia/tests/unit/controller/worker/v2/test_controller_worker.py +85 -42
  114. octavia/tests/unit/network/drivers/neutron/test_allowed_address_pairs.py +48 -51
  115. octavia/tests/unit/network/drivers/neutron/test_utils.py +2 -0
  116. octavia/tests/unit/network/drivers/noop_driver/test_driver.py +0 -7
  117. {octavia-13.0.0.0rc1.data → octavia-14.0.0.data}/data/share/octavia/diskimage-create/README.rst +6 -1
  118. {octavia-13.0.0.0rc1.data → octavia-14.0.0.data}/data/share/octavia/diskimage-create/diskimage-create.sh +10 -4
  119. {octavia-13.0.0.0rc1.data → octavia-14.0.0.data}/data/share/octavia/diskimage-create/requirements.txt +0 -2
  120. {octavia-13.0.0.0rc1.data → octavia-14.0.0.data}/data/share/octavia/diskimage-create/tox.ini +30 -13
  121. {octavia-13.0.0.0rc1.dist-info → octavia-14.0.0.dist-info}/AUTHORS +5 -0
  122. {octavia-13.0.0.0rc1.dist-info → octavia-14.0.0.dist-info}/METADATA +6 -6
  123. {octavia-13.0.0.0rc1.dist-info → octavia-14.0.0.dist-info}/RECORD +134 -126
  124. {octavia-13.0.0.0rc1.dist-info → octavia-14.0.0.dist-info}/entry_points.txt +1 -1
  125. octavia-14.0.0.dist-info/pbr.json +1 -0
  126. octavia-13.0.0.0rc1.dist-info/pbr.json +0 -1
  127. {octavia-13.0.0.0rc1.data → octavia-14.0.0.data}/data/share/octavia/LICENSE +0 -0
  128. {octavia-13.0.0.0rc1.data → octavia-14.0.0.data}/data/share/octavia/README.rst +0 -0
  129. {octavia-13.0.0.0rc1.data → octavia-14.0.0.data}/data/share/octavia/diskimage-create/image-tests.sh +0 -0
  130. {octavia-13.0.0.0rc1.data → octavia-14.0.0.data}/data/share/octavia/diskimage-create/test-requirements.txt +0 -0
  131. {octavia-13.0.0.0rc1.data → octavia-14.0.0.data}/data/share/octavia/diskimage-create/version.txt +0 -0
  132. {octavia-13.0.0.0rc1.data → octavia-14.0.0.data}/scripts/octavia-wsgi +0 -0
  133. {octavia-13.0.0.0rc1.dist-info → octavia-14.0.0.dist-info}/LICENSE +0 -0
  134. {octavia-13.0.0.0rc1.dist-info → octavia-14.0.0.dist-info}/WHEEL +0 -0
  135. {octavia-13.0.0.0rc1.dist-info → octavia-14.0.0.dist-info}/top_level.txt +0 -0
@@ -14,6 +14,7 @@
14
14
  #
15
15
 
16
16
  from cryptography import fernet
17
+ from octavia_lib.common import constants as lib_consts
17
18
  from oslo_config import cfg
18
19
  from oslo_db import exception as odb_exceptions
19
20
  from oslo_log import log as logging
@@ -27,6 +28,7 @@ from taskflow.types import failure
27
28
  from octavia.api.drivers import utils as provider_utils
28
29
  from octavia.common import constants
29
30
  from octavia.common import data_models
31
+ from octavia.common import exceptions
30
32
  from octavia.common.tls_utils import cert_parser
31
33
  from octavia.common import utils
32
34
  from octavia.controller.worker import task_utils as task_utilities
@@ -2621,15 +2623,13 @@ class DecrementHealthMonitorQuota(BaseDatabaseTask):
2621
2623
 
2622
2624
  try:
2623
2625
  session = db_apis.get_session()
2624
- lock_session = db_apis.get_session()
2625
2626
  try:
2626
2627
  self.repos.check_quota_met(session,
2627
- lock_session,
2628
2628
  data_models.HealthMonitor,
2629
2629
  project_id)
2630
- lock_session.commit()
2630
+ session.commit()
2631
2631
  except Exception:
2632
- lock_session.rollback()
2632
+ session.rollback()
2633
2633
  except Exception:
2634
2634
  # Don't fail the revert flow
2635
2635
  pass
@@ -2679,15 +2679,13 @@ class DecrementListenerQuota(BaseDatabaseTask):
2679
2679
 
2680
2680
  try:
2681
2681
  session = db_apis.get_session()
2682
- lock_session = db_apis.get_session()
2683
2682
  try:
2684
2683
  self.repos.check_quota_met(session,
2685
- lock_session,
2686
2684
  data_models.Listener,
2687
2685
  project_id)
2688
- lock_session.commit()
2686
+ session.commit()
2689
2687
  except Exception:
2690
- lock_session.rollback()
2688
+ session.rollback()
2691
2689
  except Exception:
2692
2690
  # Don't fail the revert flow
2693
2691
  pass
@@ -2739,15 +2737,13 @@ class DecrementLoadBalancerQuota(BaseDatabaseTask):
2739
2737
 
2740
2738
  try:
2741
2739
  session = db_apis.get_session()
2742
- lock_session = db_apis.get_session()
2743
2740
  try:
2744
2741
  self.repos.check_quota_met(session,
2745
- lock_session,
2746
2742
  data_models.LoadBalancer,
2747
2743
  project_id)
2748
- lock_session.commit()
2744
+ session.commit()
2749
2745
  except Exception:
2750
- lock_session.rollback()
2746
+ session.rollback()
2751
2747
  except Exception:
2752
2748
  # Don't fail the revert flow
2753
2749
  pass
@@ -2798,15 +2794,13 @@ class DecrementMemberQuota(BaseDatabaseTask):
2798
2794
 
2799
2795
  try:
2800
2796
  session = db_apis.get_session()
2801
- lock_session = db_apis.get_session()
2802
2797
  try:
2803
2798
  self.repos.check_quota_met(session,
2804
- lock_session,
2805
2799
  data_models.Member,
2806
2800
  project_id)
2807
- lock_session.commit()
2801
+ session.commit()
2808
2802
  except Exception:
2809
- lock_session.rollback()
2803
+ session.rollback()
2810
2804
  except Exception:
2811
2805
  # Don't fail the revert flow
2812
2806
  pass
@@ -2871,42 +2865,38 @@ class DecrementPoolQuota(BaseDatabaseTask):
2871
2865
  # in case other quota actions have occurred
2872
2866
  try:
2873
2867
  session = db_apis.get_session()
2874
- lock_session = db_apis.get_session()
2875
2868
  try:
2876
2869
  self.repos.check_quota_met(session,
2877
- lock_session,
2878
2870
  data_models.Pool,
2879
2871
  project_id)
2880
- lock_session.commit()
2872
+ session.commit()
2881
2873
  except Exception:
2882
- lock_session.rollback()
2874
+ session.rollback()
2883
2875
 
2884
2876
  # Attempt to increment back the health monitor quota
2885
2877
  if pool_child_count['HM'] > 0:
2886
- lock_session = db_apis.get_session()
2878
+ session = db_apis.get_session()
2887
2879
  try:
2888
2880
  self.repos.check_quota_met(session,
2889
- lock_session,
2890
2881
  data_models.HealthMonitor,
2891
2882
  project_id)
2892
- lock_session.commit()
2883
+ session.commit()
2893
2884
  except Exception:
2894
- lock_session.rollback()
2885
+ session.rollback()
2895
2886
 
2896
2887
  # Attempt to increment back the member quota
2897
2888
  # This is separate calls to maximize the correction
2898
2889
  # should other factors have increased the in use quota
2899
2890
  # before this point in the revert flow
2900
2891
  for i in range(pool_child_count['member']):
2901
- lock_session = db_apis.get_session()
2892
+ session = db_apis.get_session()
2902
2893
  try:
2903
2894
  self.repos.check_quota_met(session,
2904
- lock_session,
2905
2895
  data_models.Member,
2906
2896
  project_id)
2907
- lock_session.commit()
2897
+ session.commit()
2908
2898
  except Exception:
2909
- lock_session.rollback()
2899
+ session.rollback()
2910
2900
  except Exception:
2911
2901
  # Don't fail the revert flow
2912
2902
  pass
@@ -2954,7 +2944,8 @@ class DecrementL7policyQuota(BaseDatabaseTask):
2954
2944
  data_models.L7Policy,
2955
2945
  l7policy[constants.PROJECT_ID])
2956
2946
  db_l7policy = self.l7policy_repo.get(
2957
- db_apis.get_session(), id=l7policy[constants.L7POLICY_ID])
2947
+ lock_session,
2948
+ id=l7policy[constants.L7POLICY_ID])
2958
2949
 
2959
2950
  if db_l7policy and db_l7policy.l7rules:
2960
2951
  self.repos.decrement_quota(lock_session,
@@ -2982,28 +2973,26 @@ class DecrementL7policyQuota(BaseDatabaseTask):
2982
2973
  if not isinstance(result, failure.Failure):
2983
2974
  try:
2984
2975
  session = db_apis.get_session()
2985
- lock_session = db_apis.get_session()
2986
2976
  try:
2987
2977
  self.repos.check_quota_met(session,
2988
- lock_session,
2989
2978
  data_models.L7Policy,
2990
2979
  l7policy[constants.PROJECT_ID])
2991
- lock_session.commit()
2980
+ session.commit()
2992
2981
  except Exception:
2993
- lock_session.rollback()
2982
+ session.rollback()
2994
2983
  db_l7policy = self.l7policy_repo.get(
2995
2984
  session, id=l7policy[constants.L7POLICY_ID])
2996
2985
  if db_l7policy:
2997
2986
  # Attempt to increment back the L7Rule quota
2998
2987
  for i in range(len(db_l7policy.l7rules)):
2999
- lock_session = db_apis.get_session()
2988
+ session = db_apis.get_session()
3000
2989
  try:
3001
2990
  self.repos.check_quota_met(
3002
- session, lock_session, data_models.L7Rule,
2991
+ session, data_models.L7Rule,
3003
2992
  db_l7policy.project_id)
3004
- lock_session.commit()
2993
+ session.commit()
3005
2994
  except Exception:
3006
- lock_session.rollback()
2995
+ session.rollback()
3007
2996
  except Exception:
3008
2997
  # Don't fail the revert flow
3009
2998
  pass
@@ -3054,15 +3043,13 @@ class DecrementL7ruleQuota(BaseDatabaseTask):
3054
3043
 
3055
3044
  try:
3056
3045
  session = db_apis.get_session()
3057
- lock_session = db_apis.get_session()
3058
3046
  try:
3059
3047
  self.repos.check_quota_met(session,
3060
- lock_session,
3061
3048
  data_models.L7Rule,
3062
3049
  l7rule[constants.PROJECT_ID])
3063
- lock_session.commit()
3050
+ session.commit()
3064
3051
  except Exception:
3065
- lock_session.rollback()
3052
+ session.rollback()
3066
3053
  except Exception:
3067
3054
  # Don't fail the revert flow
3068
3055
  pass
@@ -3088,3 +3075,47 @@ class UpdatePoolMembersOperatingStatusInDB(BaseDatabaseTask):
3088
3075
  with db_apis.session().begin() as session:
3089
3076
  self.member_repo.update_pool_members(
3090
3077
  session, pool_id, operating_status=operating_status)
3078
+
3079
+
3080
+ class GetAmphoraFirewallRules(BaseDatabaseTask):
3081
+ """Task to build firewall rules for the amphora."""
3082
+
3083
+ def execute(self, amphorae, amphora_index, amphorae_network_config):
3084
+ this_amp_id = amphorae[amphora_index][constants.ID]
3085
+ amp_net_config = amphorae_network_config[this_amp_id]
3086
+
3087
+ lb_dict = amp_net_config[constants.AMPHORA]['load_balancer']
3088
+ vip_dict = lb_dict[constants.VIP]
3089
+
3090
+ if vip_dict[constants.VNIC_TYPE] != constants.VNIC_TYPE_DIRECT:
3091
+ LOG.debug('Load balancer VIP port is not SR-IOV enabled. Skipping '
3092
+ 'firewall rules update.')
3093
+ return [{'non-sriov-vip': True}]
3094
+
3095
+ session = db_apis.get_session()
3096
+ with session.begin():
3097
+ rules = self.listener_repo.get_port_protocol_cidr_for_lb(
3098
+ session,
3099
+ amp_net_config[constants.AMPHORA][constants.LOAD_BALANCER_ID])
3100
+
3101
+ # If we are act/stdby, inject the VRRP firewall rule(s)
3102
+ if lb_dict[constants.TOPOLOGY] == constants.TOPOLOGY_ACTIVE_STANDBY:
3103
+ for amp_cfg in lb_dict[constants.AMPHORAE]:
3104
+ if (amp_cfg[constants.ID] != this_amp_id and
3105
+ amp_cfg[constants.STATUS] ==
3106
+ lib_consts.AMPHORA_ALLOCATED):
3107
+ vrrp_ip = amp_cfg[constants.VRRP_IP]
3108
+ vrrp_ip_ver = utils.ip_version(vrrp_ip)
3109
+
3110
+ if vrrp_ip_ver == 4:
3111
+ vrrp_ip_cidr = f'{vrrp_ip}/32'
3112
+ elif vrrp_ip_ver == 6:
3113
+ vrrp_ip_cidr = f'{vrrp_ip}/128'
3114
+ else:
3115
+ raise exceptions.InvalidIPAddress(ip_addr=vrrp_ip)
3116
+
3117
+ rules.append({constants.PROTOCOL: constants.VRRP,
3118
+ constants.CIDR: vrrp_ip_cidr,
3119
+ constants.PORT: 112})
3120
+ LOG.debug('Amphora %s SR-IOV firewall rules: %s', this_amp_id, rules)
3121
+ return rules
@@ -19,7 +19,7 @@ from octavia.controller.worker import task_utils as task_utilities
19
19
 
20
20
 
21
21
  class BaseLifecycleTask(task.Task):
22
- """Base task to instansiate common classes."""
22
+ """Base task to instantiate common classes."""
23
23
 
24
24
  def __init__(self, **kwargs):
25
25
  self.task_utils = task_utilities.TaskUtils()
@@ -54,15 +54,22 @@ class HealthMonitorToErrorOnRevertTask(BaseLifecycleTask):
54
54
  pass
55
55
 
56
56
  def revert(self, health_mon, listeners, loadbalancer, *args, **kwargs):
57
- self.task_utils.mark_health_mon_prov_status_error(
58
- health_mon[constants.HEALTHMONITOR_ID])
59
- self.task_utils.mark_pool_prov_status_active(
60
- health_mon[constants.POOL_ID])
57
+ try:
58
+ self.task_utils.mark_health_mon_prov_status_error(
59
+ health_mon[constants.HEALTHMONITOR_ID])
60
+ self.task_utils.mark_pool_prov_status_active(
61
+ health_mon[constants.POOL_ID])
62
+ for listener in listeners:
63
+ self.task_utils.mark_listener_prov_status_active(
64
+ listener[constants.LISTENER_ID])
65
+ except Exception:
66
+ # Catching and skipping, errors are already reported by task_utils
67
+ # and we want to ensure that mark_loadbalancer_prov_status_active
68
+ # is called to unlock the LB (it will pass or it will fail after a
69
+ # very long timeout)
70
+ pass
61
71
  self.task_utils.mark_loadbalancer_prov_status_active(
62
72
  loadbalancer[constants.LOADBALANCER_ID])
63
- for listener in listeners:
64
- self.task_utils.mark_listener_prov_status_active(
65
- listener[constants.LISTENER_ID])
66
73
 
67
74
 
68
75
  class L7PolicyToErrorOnRevertTask(BaseLifecycleTask):
@@ -72,12 +79,19 @@ class L7PolicyToErrorOnRevertTask(BaseLifecycleTask):
72
79
  pass
73
80
 
74
81
  def revert(self, l7policy, listeners, loadbalancer_id, *args, **kwargs):
75
- self.task_utils.mark_l7policy_prov_status_error(
76
- l7policy[constants.L7POLICY_ID])
82
+ try:
83
+ self.task_utils.mark_l7policy_prov_status_error(
84
+ l7policy[constants.L7POLICY_ID])
85
+ for listener in listeners:
86
+ self.task_utils.mark_listener_prov_status_active(
87
+ listener[constants.LISTENER_ID])
88
+ except Exception:
89
+ # Catching and skipping, errors are already reported by task_utils
90
+ # and we want to ensure that mark_loadbalancer_prov_status_active
91
+ # is called to unlock the LB (it will pass or it will fail after a
92
+ # very long timeout)
93
+ pass
77
94
  self.task_utils.mark_loadbalancer_prov_status_active(loadbalancer_id)
78
- for listener in listeners:
79
- self.task_utils.mark_listener_prov_status_active(
80
- listener[constants.LISTENER_ID])
81
95
 
82
96
 
83
97
  class L7RuleToErrorOnRevertTask(BaseLifecycleTask):
@@ -88,14 +102,21 @@ class L7RuleToErrorOnRevertTask(BaseLifecycleTask):
88
102
 
89
103
  def revert(self, l7rule, l7policy_id, listeners, loadbalancer_id, *args,
90
104
  **kwargs):
91
- self.task_utils.mark_l7rule_prov_status_error(
92
- l7rule[constants.L7RULE_ID])
93
- self.task_utils.mark_l7policy_prov_status_active(l7policy_id)
105
+ try:
106
+ self.task_utils.mark_l7rule_prov_status_error(
107
+ l7rule[constants.L7RULE_ID])
108
+ self.task_utils.mark_l7policy_prov_status_active(l7policy_id)
109
+ for listener in listeners:
110
+ self.task_utils.mark_listener_prov_status_active(
111
+ listener[constants.LISTENER_ID])
112
+ except Exception:
113
+ # Catching and skipping, errors are already reported by task_utils
114
+ # and we want to ensure that mark_loadbalancer_prov_status_active
115
+ # is called to unlock the LB (it will pass or it will fail after a
116
+ # very long timeout)
117
+ pass
94
118
  self.task_utils.mark_loadbalancer_prov_status_active(
95
119
  loadbalancer_id)
96
- for listener in listeners:
97
- self.task_utils.mark_listener_prov_status_active(
98
- listener[constants.LISTENER_ID])
99
120
 
100
121
 
101
122
  class ListenerToErrorOnRevertTask(BaseLifecycleTask):
@@ -105,8 +126,15 @@ class ListenerToErrorOnRevertTask(BaseLifecycleTask):
105
126
  pass
106
127
 
107
128
  def revert(self, listener, *args, **kwargs):
108
- self.task_utils.mark_listener_prov_status_error(
109
- listener[constants.LISTENER_ID])
129
+ try:
130
+ self.task_utils.mark_listener_prov_status_error(
131
+ listener[constants.LISTENER_ID])
132
+ except Exception:
133
+ # Catching and skipping, errors are already reported by task_utils
134
+ # and we want to ensure that mark_loadbalancer_prov_status_active
135
+ # is called to unlock the LB (it will pass or it will fail after a
136
+ # very long timeout)
137
+ pass
110
138
  self.task_utils.mark_loadbalancer_prov_status_active(
111
139
  listener[constants.LOADBALANCER_ID])
112
140
 
@@ -118,9 +146,16 @@ class ListenersToErrorOnRevertTask(BaseLifecycleTask):
118
146
  pass
119
147
 
120
148
  def revert(self, listeners, *args, **kwargs):
121
- for listener in listeners:
122
- self.task_utils.mark_listener_prov_status_error(
123
- listener[constants.LISTENER_ID])
149
+ try:
150
+ for listener in listeners:
151
+ self.task_utils.mark_listener_prov_status_error(
152
+ listener[constants.LISTENER_ID])
153
+ except Exception:
154
+ # Catching and skipping, errors are already reported by task_utils
155
+ # and we want to ensure that mark_loadbalancer_prov_status_active
156
+ # is called to unlock the LB (it will pass or it will fail after a
157
+ # very long timeout)
158
+ pass
124
159
  self.task_utils.mark_loadbalancer_prov_status_active(
125
160
  listeners[0][constants.LOADBALANCER_ID])
126
161
 
@@ -154,12 +189,19 @@ class MemberToErrorOnRevertTask(BaseLifecycleTask):
154
189
 
155
190
  def revert(self, member, listeners, loadbalancer, pool_id, *args,
156
191
  **kwargs):
157
- self.task_utils.mark_member_prov_status_error(
158
- member[constants.MEMBER_ID])
159
- for listener in listeners:
160
- self.task_utils.mark_listener_prov_status_active(
161
- listener[constants.LISTENER_ID])
162
- self.task_utils.mark_pool_prov_status_active(pool_id)
192
+ try:
193
+ self.task_utils.mark_member_prov_status_error(
194
+ member[constants.MEMBER_ID])
195
+ for listener in listeners:
196
+ self.task_utils.mark_listener_prov_status_active(
197
+ listener[constants.LISTENER_ID])
198
+ self.task_utils.mark_pool_prov_status_active(pool_id)
199
+ except Exception:
200
+ # Catching and skipping, errors are already reported by task_utils
201
+ # and we want to ensure that mark_loadbalancer_prov_status_active
202
+ # is called to unlock the LB (it will pass or it will fail after a
203
+ # very long timeout)
204
+ pass
163
205
  self.task_utils.mark_loadbalancer_prov_status_active(
164
206
  loadbalancer[constants.LOADBALANCER_ID])
165
207
 
@@ -172,13 +214,20 @@ class MembersToErrorOnRevertTask(BaseLifecycleTask):
172
214
 
173
215
  def revert(self, members, listeners, loadbalancer, pool_id, *args,
174
216
  **kwargs):
175
- for m in members:
176
- self.task_utils.mark_member_prov_status_error(
177
- m[constants.MEMBER_ID])
178
- for listener in listeners:
179
- self.task_utils.mark_listener_prov_status_active(
180
- listener[constants.LISTENER_ID])
181
- self.task_utils.mark_pool_prov_status_active(pool_id)
217
+ try:
218
+ for m in members:
219
+ self.task_utils.mark_member_prov_status_error(
220
+ m[constants.MEMBER_ID])
221
+ for listener in listeners:
222
+ self.task_utils.mark_listener_prov_status_active(
223
+ listener[constants.LISTENER_ID])
224
+ self.task_utils.mark_pool_prov_status_active(pool_id)
225
+ except Exception:
226
+ # Catching and skipping, errors are already reported by task_utils
227
+ # and we want to ensure that mark_loadbalancer_prov_status_active
228
+ # is called to unlock the LB (it will pass or it will fail after a
229
+ # very long timeout)
230
+ pass
182
231
  self.task_utils.mark_loadbalancer_prov_status_active(
183
232
  loadbalancer[constants.LOADBALANCER_ID])
184
233
 
@@ -190,9 +239,16 @@ class PoolToErrorOnRevertTask(BaseLifecycleTask):
190
239
  pass
191
240
 
192
241
  def revert(self, pool_id, listeners, loadbalancer, *args, **kwargs):
193
- self.task_utils.mark_pool_prov_status_error(pool_id)
242
+ try:
243
+ self.task_utils.mark_pool_prov_status_error(pool_id)
244
+ for listener in listeners:
245
+ self.task_utils.mark_listener_prov_status_active(
246
+ listener[constants.LISTENER_ID])
247
+ except Exception:
248
+ # Catching and skipping, errors are already reported by task_utils
249
+ # and we want to ensure that mark_loadbalancer_prov_status_active
250
+ # is called to unlock the LB (it will pass or it will fail after a
251
+ # very long timeout)
252
+ pass
194
253
  self.task_utils.mark_loadbalancer_prov_status_active(
195
254
  loadbalancer[constants.LOADBALANCER_ID])
196
- for listener in listeners:
197
- self.task_utils.mark_listener_prov_status_active(
198
- listener[constants.LISTENER_ID])
@@ -478,53 +478,6 @@ class HandleNetworkDeltas(BaseNetworkTask):
478
478
  LOG.exception("Unable to delete port %s", port_id)
479
479
 
480
480
 
481
- class PlugVIP(BaseNetworkTask):
482
- """Task to plumb a VIP."""
483
-
484
- def execute(self, loadbalancer):
485
- """Plumb a vip to an amphora."""
486
-
487
- LOG.debug("Plumbing VIP for loadbalancer id: %s",
488
- loadbalancer[constants.LOADBALANCER_ID])
489
- session = db_apis.get_session()
490
- with session.begin():
491
- db_lb = self.loadbalancer_repo.get(
492
- session,
493
- id=loadbalancer[constants.LOADBALANCER_ID])
494
- amps_data = self.network_driver.plug_vip(db_lb,
495
- db_lb.vip)
496
- return [amp.to_dict() for amp in amps_data]
497
-
498
- def revert(self, result, loadbalancer, *args, **kwargs):
499
- """Handle a failure to plumb a vip."""
500
-
501
- if isinstance(result, failure.Failure):
502
- return
503
- LOG.warning("Unable to plug VIP for loadbalancer id %s",
504
- loadbalancer[constants.LOADBALANCER_ID])
505
-
506
- session = db_apis.get_session()
507
- with session.begin():
508
- db_lb = self.loadbalancer_repo.get(
509
- session,
510
- id=loadbalancer[constants.LOADBALANCER_ID])
511
- try:
512
- # Make sure we have the current port IDs for cleanup
513
- for amp_data in result:
514
- for amphora in filter(
515
- # pylint: disable=cell-var-from-loop
516
- lambda amp: amp.id == amp_data['id'],
517
- db_lb.amphorae):
518
- amphora.vrrp_port_id = amp_data['vrrp_port_id']
519
- amphora.ha_port_id = amp_data['ha_port_id']
520
-
521
- self.network_driver.unplug_vip(db_lb, db_lb.vip)
522
- except Exception as e:
523
- LOG.error("Failed to unplug VIP. Resources may still "
524
- "be in use from vip: %(vip)s due to error: %(except)s",
525
- {'vip': loadbalancer['vip_address'], 'except': str(e)})
526
-
527
-
528
481
  class UpdateVIPSecurityGroup(BaseNetworkTask):
529
482
  """Task to setup SG for LB."""
530
483
 
@@ -791,19 +744,6 @@ class GetAmphoraeNetworkConfigs(BaseNetworkTask):
791
744
  return provider_dict
792
745
 
793
746
 
794
- class FailoverPreparationForAmphora(BaseNetworkTask):
795
- """Task to prepare an amphora for failover."""
796
-
797
- def execute(self, amphora):
798
- session = db_apis.get_session()
799
- with session.begin():
800
- db_amp = self.amphora_repo.get(session,
801
- id=amphora[constants.ID])
802
- LOG.debug("Prepare amphora %s for failover.", amphora[constants.ID])
803
-
804
- self.network_driver.failover_preparation(db_amp)
805
-
806
-
807
747
  class RetrievePortIDsOnAmphoraExceptLBNetwork(BaseNetworkTask):
808
748
  """Task retrieving all the port ids on an amphora, except lb network."""
809
749
 
@@ -1098,3 +1038,60 @@ class GetVIPSecurityGroupID(BaseNetworkTask):
1098
1038
  else:
1099
1039
  ctxt.reraise = False
1100
1040
  return None
1041
+
1042
+
1043
+ class CreateSRIOVBasePort(BaseNetworkTask):
1044
+ """Task to create a SRIOV base port for an amphora."""
1045
+
1046
+ @tenacity.retry(retry=tenacity.retry_if_exception_type(),
1047
+ stop=tenacity.stop_after_attempt(
1048
+ CONF.networking.max_retries),
1049
+ wait=tenacity.wait_exponential(
1050
+ multiplier=CONF.networking.retry_backoff,
1051
+ min=CONF.networking.retry_interval,
1052
+ max=CONF.networking.retry_max), reraise=True)
1053
+ def execute(self, loadbalancer, amphora, subnet):
1054
+ session = db_apis.get_session()
1055
+ with session.begin():
1056
+ db_lb = self.loadbalancer_repo.get(
1057
+ session, id=loadbalancer[constants.LOADBALANCER_ID])
1058
+ port_name = constants.AMP_BASE_PORT_PREFIX + amphora[constants.ID]
1059
+ fixed_ips = [{constants.SUBNET_ID: subnet[constants.ID]}]
1060
+ addl_vips = [obj.ip_address for obj in db_lb.additional_vips]
1061
+ addl_vips.append(loadbalancer[constants.VIP_ADDRESS])
1062
+ port = self.network_driver.create_port(
1063
+ loadbalancer[constants.VIP_NETWORK_ID],
1064
+ name=port_name, fixed_ips=fixed_ips,
1065
+ secondary_ips=addl_vips,
1066
+ qos_policy_id=loadbalancer[constants.VIP_QOS_POLICY_ID],
1067
+ vnic_type=constants.VNIC_TYPE_DIRECT)
1068
+ LOG.info('Created port %s with ID %s for amphora %s',
1069
+ port_name, port.id, amphora[constants.ID])
1070
+ return port.to_dict(recurse=True)
1071
+
1072
+ def revert(self, result, loadbalancer, amphora, subnet, *args, **kwargs):
1073
+ if isinstance(result, failure.Failure):
1074
+ return
1075
+ try:
1076
+ port_name = constants.AMP_BASE_PORT_PREFIX + amphora['id']
1077
+ self.network_driver.delete_port(result[constants.ID])
1078
+ LOG.info('Deleted port %s with ID %s for amphora %s due to a '
1079
+ 'revert.', port_name, result[constants.ID], amphora['id'])
1080
+ except Exception as e:
1081
+ LOG.error('Failed to delete port %s. Resources may still be in '
1082
+ 'use for a port intended for amphora %s due to error '
1083
+ '%s. Search for a port named %s',
1084
+ result, amphora['id'], str(e), port_name)
1085
+
1086
+
1087
+ class BuildAMPData(BaseNetworkTask):
1088
+ """Glue task to store the AMP_DATA dict from netork port information."""
1089
+
1090
+ def execute(self, loadbalancer, amphora, port_data):
1091
+ amphora[constants.HA_IP] = loadbalancer[constants.VIP_ADDRESS]
1092
+ amphora[constants.HA_PORT_ID] = loadbalancer[constants.VIP_PORT_ID]
1093
+ amphora[constants.VRRP_ID] = 1
1094
+ amphora[constants.VRRP_PORT_ID] = port_data[constants.ID]
1095
+ amphora[constants.VRRP_IP] = port_data[
1096
+ constants.FIXED_IPS][0][constants.IP_ADDRESS]
1097
+ return amphora
@@ -0,0 +1,28 @@
1
+ # Copyright 2024 Red Hat
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License"); you may
4
+ # not use this file except in compliance with the License. You may obtain
5
+ # a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
11
+ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
12
+ # License for the specific language governing permissions and limitations
13
+ # under the License.
14
+ #
15
+ from taskflow import task
16
+
17
+ from octavia.common import constants
18
+
19
+
20
+ class AmphoraToAmphoraeWithVRRPIP(task.Task):
21
+ """A shim class to convert a single Amphora instance to a list."""
22
+
23
+ def execute(self, amphora: dict, base_port: dict):
24
+ # The VRRP_IP has not been stamped on the Amphora at this point in the
25
+ # flow, so inject it from our port create call in a previous task.
26
+ amphora[constants.VRRP_IP] = (
27
+ base_port[constants.FIXED_IPS][0][constants.IP_ADDRESS])
28
+ return [amphora]
@@ -30,7 +30,7 @@ down_revision = '76aacf2e176c'
30
30
 
31
31
 
32
32
  def upgrade():
33
- # Add collumn redirect_prefix
33
+ # Add column redirect_prefix
34
34
  op.add_column(
35
35
  u'l7policy',
36
36
  sa.Column(u'redirect_prefix', sa.String(255), nullable=True)
@@ -11,7 +11,7 @@
11
11
  # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
12
12
  # License for the specific language governing permissions and limitations
13
13
  # under the License.
14
- """add cert expiration infor in amphora table
14
+ """add cert expiration info in amphora table
15
15
 
16
16
  Revision ID: 5a3ee5472c31
17
17
  Revises: 3b199c848b96
@@ -29,7 +29,7 @@ down_revision = 'a7f187cd221f'
29
29
 
30
30
 
31
31
  def upgrade():
32
- # Add collumn redirect_prefix
32
+ # Add column redirect_prefix
33
33
  op.add_column(
34
34
  u'l7policy',
35
35
  sa.Column(u'redirect_http_code', sa.Integer(), nullable=True)