octavia 13.0.0.0rc1__py3-none-any.whl → 14.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (135) hide show
  1. octavia/amphorae/backends/agent/api_server/lvs_listener_base.py +1 -1
  2. octavia/amphorae/backends/agent/api_server/osutils.py +5 -5
  3. octavia/amphorae/backends/agent/api_server/plug.py +3 -2
  4. octavia/amphorae/backends/agent/api_server/rules_schema.py +52 -0
  5. octavia/amphorae/backends/agent/api_server/server.py +28 -1
  6. octavia/amphorae/backends/utils/interface.py +45 -6
  7. octavia/amphorae/backends/utils/interface_file.py +9 -6
  8. octavia/amphorae/backends/utils/nftable_utils.py +125 -0
  9. octavia/amphorae/drivers/driver_base.py +27 -0
  10. octavia/amphorae/drivers/haproxy/rest_api_driver.py +42 -10
  11. octavia/amphorae/drivers/health/heartbeat_udp.py +2 -2
  12. octavia/amphorae/drivers/keepalived/vrrp_rest_driver.py +2 -1
  13. octavia/amphorae/drivers/noop_driver/driver.py +25 -0
  14. octavia/api/app.py +3 -0
  15. octavia/api/common/pagination.py +2 -2
  16. octavia/api/drivers/amphora_driver/flavor_schema.py +6 -1
  17. octavia/api/root_controller.py +4 -1
  18. octavia/api/v2/controllers/health_monitor.py +0 -1
  19. octavia/api/v2/controllers/l7policy.py +0 -1
  20. octavia/api/v2/controllers/l7rule.py +0 -1
  21. octavia/api/v2/controllers/listener.py +0 -1
  22. octavia/api/v2/controllers/load_balancer.py +13 -7
  23. octavia/api/v2/controllers/member.py +6 -3
  24. octavia/api/v2/controllers/pool.py +6 -7
  25. octavia/api/v2/types/load_balancer.py +5 -1
  26. octavia/api/v2/types/pool.py +1 -1
  27. octavia/certificates/common/pkcs12.py +9 -9
  28. octavia/certificates/manager/barbican.py +24 -16
  29. octavia/certificates/manager/castellan_mgr.py +12 -7
  30. octavia/certificates/manager/local.py +4 -4
  31. octavia/certificates/manager/noop.py +106 -0
  32. octavia/cmd/driver_agent.py +1 -1
  33. octavia/cmd/health_checker.py +0 -4
  34. octavia/cmd/health_manager.py +1 -5
  35. octavia/cmd/house_keeping.py +1 -1
  36. octavia/cmd/interface.py +0 -4
  37. octavia/cmd/octavia_worker.py +0 -4
  38. octavia/cmd/prometheus_proxy.py +0 -5
  39. octavia/cmd/status.py +0 -6
  40. octavia/common/base_taskflow.py +1 -1
  41. octavia/common/clients.py +15 -3
  42. octavia/common/config.py +24 -6
  43. octavia/common/constants.py +34 -0
  44. octavia/common/data_models.py +3 -1
  45. octavia/common/exceptions.py +11 -0
  46. octavia/common/jinja/haproxy/combined_listeners/templates/macros.j2 +7 -5
  47. octavia/common/keystone.py +7 -7
  48. octavia/common/tls_utils/cert_parser.py +24 -10
  49. octavia/common/utils.py +6 -0
  50. octavia/common/validate.py +2 -2
  51. octavia/compute/drivers/nova_driver.py +23 -5
  52. octavia/controller/worker/task_utils.py +28 -6
  53. octavia/controller/worker/v2/controller_worker.py +49 -15
  54. octavia/controller/worker/v2/flows/amphora_flows.py +120 -21
  55. octavia/controller/worker/v2/flows/flow_utils.py +15 -13
  56. octavia/controller/worker/v2/flows/listener_flows.py +95 -5
  57. octavia/controller/worker/v2/flows/load_balancer_flows.py +74 -30
  58. octavia/controller/worker/v2/taskflow_jobboard_driver.py +17 -1
  59. octavia/controller/worker/v2/tasks/amphora_driver_tasks.py +145 -24
  60. octavia/controller/worker/v2/tasks/compute_tasks.py +1 -1
  61. octavia/controller/worker/v2/tasks/database_tasks.py +72 -41
  62. octavia/controller/worker/v2/tasks/lifecycle_tasks.py +97 -41
  63. octavia/controller/worker/v2/tasks/network_tasks.py +57 -60
  64. octavia/controller/worker/v2/tasks/shim_tasks.py +28 -0
  65. octavia/db/migration/alembic_migrations/versions/55874a4ceed6_add_l7policy_action_redirect_prefix.py +1 -1
  66. octavia/db/migration/alembic_migrations/versions/5a3ee5472c31_add_cert_expiration__infor_in_amphora_table.py +1 -1
  67. octavia/db/migration/alembic_migrations/versions/6742ca1b27c2_add_l7policy_redirect_http_code.py +1 -1
  68. octavia/db/migration/alembic_migrations/versions/db2a73e82626_add_vnic_type_for_vip.py +36 -0
  69. octavia/db/models.py +1 -0
  70. octavia/db/prepare.py +1 -1
  71. octavia/db/repositories.py +53 -34
  72. octavia/distributor/drivers/driver_base.py +1 -1
  73. octavia/network/base.py +3 -16
  74. octavia/network/data_models.py +4 -1
  75. octavia/network/drivers/neutron/allowed_address_pairs.py +27 -26
  76. octavia/network/drivers/noop_driver/driver.py +10 -23
  77. octavia/tests/common/sample_certs.py +115 -0
  78. octavia/tests/common/sample_haproxy_prometheus +1 -1
  79. octavia/tests/functional/amphorae/backend/agent/api_server/test_server.py +37 -0
  80. octavia/tests/functional/api/test_healthcheck.py +2 -2
  81. octavia/tests/functional/api/v2/base.py +1 -1
  82. octavia/tests/functional/api/v2/test_listener.py +45 -0
  83. octavia/tests/functional/api/v2/test_load_balancer.py +17 -0
  84. octavia/tests/functional/db/base.py +9 -0
  85. octavia/tests/functional/db/test_models.py +2 -1
  86. octavia/tests/functional/db/test_repositories.py +55 -99
  87. octavia/tests/unit/amphorae/backends/agent/api_server/test_osutils.py +4 -2
  88. octavia/tests/unit/amphorae/backends/utils/test_interface.py +201 -1
  89. octavia/tests/unit/amphorae/backends/utils/test_keepalivedlvs_query.py +1 -1
  90. octavia/tests/unit/amphorae/backends/utils/test_nftable_utils.py +194 -0
  91. octavia/tests/unit/amphorae/drivers/haproxy/test_rest_api_driver.py +27 -5
  92. octavia/tests/unit/amphorae/drivers/haproxy/test_rest_api_driver_1_0.py +15 -2
  93. octavia/tests/unit/amphorae/drivers/keepalived/test_vrrp_rest_driver.py +17 -0
  94. octavia/tests/unit/amphorae/drivers/noop_driver/test_driver.py +2 -1
  95. octavia/tests/unit/api/v2/types/test_pool.py +71 -0
  96. octavia/tests/unit/certificates/manager/test_barbican.py +3 -3
  97. octavia/tests/unit/certificates/manager/test_noop.py +53 -0
  98. octavia/tests/unit/common/jinja/haproxy/combined_listeners/test_jinja_cfg.py +16 -17
  99. octavia/tests/unit/common/sample_configs/sample_configs_combined.py +5 -3
  100. octavia/tests/unit/common/test_config.py +35 -0
  101. octavia/tests/unit/common/test_keystone.py +32 -0
  102. octavia/tests/unit/common/test_utils.py +39 -0
  103. octavia/tests/unit/compute/drivers/test_nova_driver.py +22 -0
  104. octavia/tests/unit/controller/worker/test_task_utils.py +58 -2
  105. octavia/tests/unit/controller/worker/v2/flows/test_amphora_flows.py +28 -5
  106. octavia/tests/unit/controller/worker/v2/flows/test_listener_flows.py +64 -16
  107. octavia/tests/unit/controller/worker/v2/flows/test_load_balancer_flows.py +49 -9
  108. octavia/tests/unit/controller/worker/v2/tasks/test_amphora_driver_tasks.py +265 -17
  109. octavia/tests/unit/controller/worker/v2/tasks/test_database_tasks.py +101 -1
  110. octavia/tests/unit/controller/worker/v2/tasks/test_database_tasks_quota.py +19 -19
  111. octavia/tests/unit/controller/worker/v2/tasks/test_network_tasks.py +105 -42
  112. octavia/tests/unit/controller/worker/v2/tasks/test_shim_tasks.py +33 -0
  113. octavia/tests/unit/controller/worker/v2/test_controller_worker.py +85 -42
  114. octavia/tests/unit/network/drivers/neutron/test_allowed_address_pairs.py +48 -51
  115. octavia/tests/unit/network/drivers/neutron/test_utils.py +2 -0
  116. octavia/tests/unit/network/drivers/noop_driver/test_driver.py +0 -7
  117. {octavia-13.0.0.0rc1.data → octavia-14.0.0.data}/data/share/octavia/diskimage-create/README.rst +6 -1
  118. {octavia-13.0.0.0rc1.data → octavia-14.0.0.data}/data/share/octavia/diskimage-create/diskimage-create.sh +10 -4
  119. {octavia-13.0.0.0rc1.data → octavia-14.0.0.data}/data/share/octavia/diskimage-create/requirements.txt +0 -2
  120. {octavia-13.0.0.0rc1.data → octavia-14.0.0.data}/data/share/octavia/diskimage-create/tox.ini +30 -13
  121. {octavia-13.0.0.0rc1.dist-info → octavia-14.0.0.dist-info}/AUTHORS +5 -0
  122. {octavia-13.0.0.0rc1.dist-info → octavia-14.0.0.dist-info}/METADATA +6 -6
  123. {octavia-13.0.0.0rc1.dist-info → octavia-14.0.0.dist-info}/RECORD +134 -126
  124. {octavia-13.0.0.0rc1.dist-info → octavia-14.0.0.dist-info}/entry_points.txt +1 -1
  125. octavia-14.0.0.dist-info/pbr.json +1 -0
  126. octavia-13.0.0.0rc1.dist-info/pbr.json +0 -1
  127. {octavia-13.0.0.0rc1.data → octavia-14.0.0.data}/data/share/octavia/LICENSE +0 -0
  128. {octavia-13.0.0.0rc1.data → octavia-14.0.0.data}/data/share/octavia/README.rst +0 -0
  129. {octavia-13.0.0.0rc1.data → octavia-14.0.0.data}/data/share/octavia/diskimage-create/image-tests.sh +0 -0
  130. {octavia-13.0.0.0rc1.data → octavia-14.0.0.data}/data/share/octavia/diskimage-create/test-requirements.txt +0 -0
  131. {octavia-13.0.0.0rc1.data → octavia-14.0.0.data}/data/share/octavia/diskimage-create/version.txt +0 -0
  132. {octavia-13.0.0.0rc1.data → octavia-14.0.0.data}/scripts/octavia-wsgi +0 -0
  133. {octavia-13.0.0.0rc1.dist-info → octavia-14.0.0.dist-info}/LICENSE +0 -0
  134. {octavia-13.0.0.0rc1.dist-info → octavia-14.0.0.dist-info}/WHEEL +0 -0
  135. {octavia-13.0.0.0rc1.dist-info → octavia-14.0.0.dist-info}/top_level.txt +0 -0
@@ -13,7 +13,6 @@
13
13
  # License for the specific language governing permissions and limitations
14
14
  # under the License.
15
15
  #
16
-
17
16
  from oslo_config import cfg
18
17
  from oslo_log import log as logging
19
18
  from taskflow.patterns import linear_flow
@@ -47,7 +46,8 @@ class LoadBalancerFlows(object):
47
46
  self.member_flows = member_flows.MemberFlows()
48
47
  self.lb_repo = repo.LoadBalancerRepository()
49
48
 
50
- def get_create_load_balancer_flow(self, topology, listeners=None):
49
+ def get_create_load_balancer_flow(self, topology, listeners=None,
50
+ flavor_dict=None):
51
51
  """Creates a conditional graph flow that allocates a loadbalancer.
52
52
 
53
53
  :raises InvalidTopology: Invalid topology specified
@@ -59,7 +59,7 @@ class LoadBalancerFlows(object):
59
59
  lb_create_flow.add(lifecycle_tasks.LoadBalancerIDToErrorOnRevertTask(
60
60
  requires=constants.LOADBALANCER_ID))
61
61
 
62
- # allocate VIP
62
+ # allocate VIP - Saves the VIP IP(s) in neutron
63
63
  lb_create_flow.add(database_tasks.ReloadLoadBalancer(
64
64
  name=constants.RELOAD_LB_BEFOR_ALLOCATE_VIP,
65
65
  requires=constants.LOADBALANCER_ID,
@@ -81,9 +81,11 @@ class LoadBalancerFlows(object):
81
81
  provides=constants.SUBNET))
82
82
 
83
83
  if topology == constants.TOPOLOGY_ACTIVE_STANDBY:
84
- lb_create_flow.add(*self._create_active_standby_topology())
84
+ lb_create_flow.add(*self._create_active_standby_topology(
85
+ flavor_dict=flavor_dict))
85
86
  elif topology == constants.TOPOLOGY_SINGLE:
86
- lb_create_flow.add(*self._create_single_topology())
87
+ lb_create_flow.add(*self._create_single_topology(
88
+ flavor_dict=flavor_dict))
87
89
  else:
88
90
  LOG.error("Unknown topology: %s. Unable to build load balancer.",
89
91
  topology)
@@ -91,10 +93,12 @@ class LoadBalancerFlows(object):
91
93
 
92
94
  post_amp_prefix = constants.POST_LB_AMP_ASSOCIATION_SUBFLOW
93
95
  lb_create_flow.add(
94
- self.get_post_lb_amp_association_flow(post_amp_prefix, topology))
96
+ self.get_post_lb_amp_association_flow(post_amp_prefix, topology,
97
+ flavor_dict=flavor_dict))
95
98
 
96
99
  if listeners:
97
- lb_create_flow.add(*self._create_listeners_flow())
100
+ lb_create_flow.add(
101
+ *self._create_listeners_flow(flavor_dict=flavor_dict))
98
102
 
99
103
  lb_create_flow.add(
100
104
  database_tasks.MarkLBActiveInDB(
@@ -112,7 +116,7 @@ class LoadBalancerFlows(object):
112
116
 
113
117
  return lb_create_flow
114
118
 
115
- def _create_single_topology(self):
119
+ def _create_single_topology(self, flavor_dict=None):
116
120
  sf_name = (constants.ROLE_STANDALONE + '-' +
117
121
  constants.AMP_PLUG_NET_SUBFLOW)
118
122
  amp_for_lb_net_flow = linear_flow.Flow(sf_name)
@@ -120,11 +124,13 @@ class LoadBalancerFlows(object):
120
124
  prefix=constants.ROLE_STANDALONE,
121
125
  role=constants.ROLE_STANDALONE)
122
126
  amp_for_lb_net_flow.add(amp_for_lb_flow)
123
- amp_for_lb_net_flow.add(*self._get_amp_net_subflow(sf_name))
127
+ amp_for_lb_net_flow.add(*self._get_amp_net_subflow(
128
+ sf_name, flavor_dict=flavor_dict))
124
129
  return amp_for_lb_net_flow
125
130
 
126
131
  def _create_active_standby_topology(
127
- self, lf_name=constants.CREATE_LOADBALANCER_FLOW):
132
+ self, lf_name=constants.CREATE_LOADBALANCER_FLOW,
133
+ flavor_dict=None):
128
134
  # When we boot up amphora for an active/standby topology,
129
135
  # we should leverage the Nova anti-affinity capabilities
130
136
  # to place the amphora on different hosts, also we need to check
@@ -156,26 +162,45 @@ class LoadBalancerFlows(object):
156
162
  master_amp_sf = linear_flow.Flow(master_sf_name)
157
163
  master_amp_sf.add(self.amp_flows.get_amphora_for_lb_subflow(
158
164
  prefix=constants.ROLE_MASTER, role=constants.ROLE_MASTER))
159
- master_amp_sf.add(*self._get_amp_net_subflow(master_sf_name))
165
+ master_amp_sf.add(*self._get_amp_net_subflow(master_sf_name,
166
+ flavor_dict=flavor_dict))
160
167
 
161
168
  backup_sf_name = (constants.ROLE_BACKUP + '-' +
162
169
  constants.AMP_PLUG_NET_SUBFLOW)
163
170
  backup_amp_sf = linear_flow.Flow(backup_sf_name)
164
171
  backup_amp_sf.add(self.amp_flows.get_amphora_for_lb_subflow(
165
172
  prefix=constants.ROLE_BACKUP, role=constants.ROLE_BACKUP))
166
- backup_amp_sf.add(*self._get_amp_net_subflow(backup_sf_name))
173
+ backup_amp_sf.add(*self._get_amp_net_subflow(backup_sf_name,
174
+ flavor_dict=flavor_dict))
167
175
 
168
176
  amps_flow.add(master_amp_sf, backup_amp_sf)
169
177
 
170
178
  return flows + [amps_flow]
171
179
 
172
- def _get_amp_net_subflow(self, sf_name):
180
+ def _get_amp_net_subflow(self, sf_name, flavor_dict=None):
173
181
  flows = []
174
- flows.append(network_tasks.PlugVIPAmphora(
175
- name=sf_name + '-' + constants.PLUG_VIP_AMPHORA,
176
- requires=(constants.LOADBALANCER, constants.AMPHORA,
177
- constants.SUBNET),
178
- provides=constants.AMP_DATA))
182
+ # If we have an SRIOV VIP, we need to setup a firewall in the amp
183
+ if flavor_dict and flavor_dict.get(constants.SRIOV_VIP, False):
184
+ flows.append(network_tasks.CreateSRIOVBasePort(
185
+ name=sf_name + '-' + constants.PLUG_VIP_AMPHORA,
186
+ requires=(constants.LOADBALANCER, constants.AMPHORA,
187
+ constants.SUBNET),
188
+ provides=constants.PORT_DATA))
189
+ flows.append(compute_tasks.AttachPort(
190
+ name=sf_name + '-' + constants.ATTACH_PORT,
191
+ requires=(constants.AMPHORA),
192
+ rebind={constants.PORT: constants.PORT_DATA}))
193
+ flows.append(network_tasks.BuildAMPData(
194
+ name=sf_name + '-' + constants.BUILD_AMP_DATA,
195
+ requires=(constants.LOADBALANCER, constants.AMPHORA,
196
+ constants.PORT_DATA),
197
+ provides=constants.AMP_DATA))
198
+ else:
199
+ flows.append(network_tasks.PlugVIPAmphora(
200
+ name=sf_name + '-' + constants.PLUG_VIP_AMPHORA,
201
+ requires=(constants.LOADBALANCER, constants.AMPHORA,
202
+ constants.SUBNET),
203
+ provides=constants.AMP_DATA))
179
204
 
180
205
  flows.append(network_tasks.ApplyQosAmphora(
181
206
  name=sf_name + '-' + constants.APPLY_QOS_AMP,
@@ -196,7 +221,7 @@ class LoadBalancerFlows(object):
196
221
  constants.AMPHORAE_NETWORK_CONFIG)))
197
222
  return flows
198
223
 
199
- def _create_listeners_flow(self):
224
+ def _create_listeners_flow(self, flavor_dict=None):
200
225
  flows = []
201
226
  flows.append(
202
227
  database_tasks.ReloadLoadBalancer(
@@ -229,11 +254,13 @@ class LoadBalancerFlows(object):
229
254
  )
230
255
  )
231
256
  flows.append(
232
- self.listener_flows.get_create_all_listeners_flow()
257
+ self.listener_flows.get_create_all_listeners_flow(
258
+ flavor_dict=flavor_dict)
233
259
  )
234
260
  return flows
235
261
 
236
- def get_post_lb_amp_association_flow(self, prefix, topology):
262
+ def get_post_lb_amp_association_flow(self, prefix, topology,
263
+ flavor_dict=None):
237
264
  """Reload the loadbalancer and create networking subflows for
238
265
 
239
266
  created/allocated amphorae.
@@ -251,14 +278,15 @@ class LoadBalancerFlows(object):
251
278
  post_create_LB_flow.add(database_tasks.GetAmphoraeFromLoadbalancer(
252
279
  requires=constants.LOADBALANCER_ID,
253
280
  provides=constants.AMPHORAE))
254
- vrrp_subflow = self.amp_flows.get_vrrp_subflow(prefix)
281
+ vrrp_subflow = self.amp_flows.get_vrrp_subflow(
282
+ prefix, flavor_dict=flavor_dict)
255
283
  post_create_LB_flow.add(vrrp_subflow)
256
284
 
257
285
  post_create_LB_flow.add(database_tasks.UpdateLoadbalancerInDB(
258
286
  requires=[constants.LOADBALANCER, constants.UPDATE_DICT]))
259
287
  return post_create_LB_flow
260
288
 
261
- def _get_delete_listeners_flow(self, listeners):
289
+ def _get_delete_listeners_flow(self, listeners, flavor_dict=None):
262
290
  """Sets up an internal delete flow
263
291
 
264
292
  :param listeners: A list of listener dicts
@@ -268,7 +296,7 @@ class LoadBalancerFlows(object):
268
296
  for listener in listeners:
269
297
  listeners_delete_flow.add(
270
298
  self.listener_flows.get_delete_listener_internal_flow(
271
- listener))
299
+ listener, flavor_dict=flavor_dict))
272
300
  return listeners_delete_flow
273
301
 
274
302
  def get_delete_load_balancer_flow(self, lb):
@@ -466,12 +494,13 @@ class LoadBalancerFlows(object):
466
494
  role=new_amp_role,
467
495
  failed_amp_vrrp_port_id=failed_amp.get(
468
496
  constants.VRRP_PORT_ID),
469
- is_vrrp_ipv6=failed_vrrp_is_ipv6))
497
+ is_vrrp_ipv6=failed_vrrp_is_ipv6,
498
+ flavor_dict=lb[constants.FLAVOR]))
470
499
  else:
471
500
  failover_LB_flow.add(
472
501
  self.amp_flows.get_amphora_for_lb_failover_subflow(
473
502
  prefix=constants.FAILOVER_LOADBALANCER_FLOW,
474
- role=new_amp_role))
503
+ role=new_amp_role, flavor_dict=lb[constants.FLAVOR]))
475
504
 
476
505
  if lb_topology == constants.TOPOLOGY_ACTIVE_STANDBY:
477
506
  failover_LB_flow.add(database_tasks.MarkAmphoraBackupInDB(
@@ -593,7 +622,8 @@ class LoadBalancerFlows(object):
593
622
  self.amp_flows.get_amphora_for_lb_failover_subflow(
594
623
  prefix=(new_amp_role + '-' +
595
624
  constants.FAILOVER_LOADBALANCER_FLOW),
596
- role=new_amp_role))
625
+ role=new_amp_role,
626
+ flavor_dict=lb[constants.FLAVOR]))
597
627
 
598
628
  failover_LB_flow.add(database_tasks.MarkAmphoraMasterInDB(
599
629
  name=constants.MARK_AMP_MASTER_INDB,
@@ -637,6 +667,14 @@ class LoadBalancerFlows(object):
637
667
  requires=constants.LOADBALANCER_ID,
638
668
  provides=constants.AMPHORAE))
639
669
 
670
+ failover_LB_flow.add(
671
+ amphora_driver_tasks.AmphoraeGetConnectivityStatus(
672
+ name=(new_amp_role + '-' +
673
+ constants.AMPHORAE_GET_CONNECTIVITY_STATUS),
674
+ requires=constants.AMPHORAE,
675
+ rebind={constants.NEW_AMPHORA_ID: constants.AMPHORA_ID},
676
+ provides=constants.AMPHORAE_STATUS))
677
+
640
678
  # Listeners update needs to be run on all amphora to update
641
679
  # their peer configurations. So parallelize this with an
642
680
  # unordered subflow.
@@ -651,14 +689,18 @@ class LoadBalancerFlows(object):
651
689
  amphora_driver_tasks.AmphoraIndexListenerUpdate(
652
690
  name=(constants.AMPHORA + '-0-' +
653
691
  constants.AMP_LISTENER_UPDATE),
654
- requires=(constants.LOADBALANCER, constants.AMPHORAE),
692
+ requires=(constants.LOADBALANCER, constants.AMPHORAE,
693
+ constants.AMPHORAE_STATUS),
694
+ rebind={constants.NEW_AMPHORA_ID: constants.AMPHORA_ID},
655
695
  inject={constants.AMPHORA_INDEX: 0,
656
696
  constants.TIMEOUT_DICT: timeout_dict}))
657
697
  update_amps_subflow.add(
658
698
  amphora_driver_tasks.AmphoraIndexListenerUpdate(
659
699
  name=(constants.AMPHORA + '-1-' +
660
700
  constants.AMP_LISTENER_UPDATE),
661
- requires=(constants.LOADBALANCER, constants.AMPHORAE),
701
+ requires=(constants.LOADBALANCER, constants.AMPHORAE,
702
+ constants.AMPHORAE_STATUS),
703
+ rebind={constants.NEW_AMPHORA_ID: constants.AMPHORA_ID},
662
704
  inject={constants.AMPHORA_INDEX: 1,
663
705
  constants.TIMEOUT_DICT: timeout_dict}))
664
706
 
@@ -667,7 +709,8 @@ class LoadBalancerFlows(object):
667
709
  # Configure and enable keepalived in the amphora
668
710
  failover_LB_flow.add(self.amp_flows.get_vrrp_subflow(
669
711
  new_amp_role + '-' + constants.GET_VRRP_SUBFLOW,
670
- timeout_dict, create_vrrp_group=False))
712
+ timeout_dict, create_vrrp_group=False,
713
+ get_amphorae_status=False, flavor_dict=lb[constants.FLAVOR]))
671
714
 
672
715
  # #### End of standby ####
673
716
 
@@ -682,6 +725,7 @@ class LoadBalancerFlows(object):
682
725
  name=(new_amp_role + '-' +
683
726
  constants.AMPHORA_RELOAD_LISTENER),
684
727
  requires=(constants.LOADBALANCER, constants.AMPHORAE),
728
+ rebind={constants.NEW_AMPHORA_ID: constants.AMPHORA_ID},
685
729
  inject={constants.AMPHORA_INDEX: 1,
686
730
  constants.TIMEOUT_DICT: timeout_dict}))
687
731
 
@@ -41,6 +41,7 @@ class MysqlPersistenceDriver(object):
41
41
  'max_pool_size': CONF.database.max_pool_size,
42
42
  'max_overflow': CONF.database.max_overflow,
43
43
  'pool_timeout': CONF.database.pool_timeout,
44
+ 'idle_timeout': CONF.database.connection_recycle_time
44
45
  }
45
46
 
46
47
  def initialize(self):
@@ -87,14 +88,29 @@ class RedisTaskFlowDriver(JobboardTaskFlowDriver):
87
88
  self.persistence_driver = persistence_driver
88
89
 
89
90
  def job_board(self, persistence):
91
+
92
+ def _format_server(host, port):
93
+ if ':' in host:
94
+ return '[%s]:%d' % (host, port)
95
+ return '%s:%d' % (host, port)
96
+
90
97
  jobboard_backend_conf = {
91
98
  'board': 'redis',
92
99
  'host': CONF.task_flow.jobboard_backend_hosts[0],
93
100
  'port': CONF.task_flow.jobboard_backend_port,
94
- 'password': CONF.task_flow.jobboard_backend_password,
95
101
  'namespace': CONF.task_flow.jobboard_backend_namespace,
96
102
  'sentinel': CONF.task_flow.jobboard_redis_sentinel,
103
+ 'sentinel_fallbacks': [
104
+ _format_server(host, CONF.task_flow.jobboard_backend_port)
105
+ for host in CONF.task_flow.jobboard_backend_hosts[1:]
106
+ ]
97
107
  }
108
+ if CONF.task_flow.jobboard_backend_username is not None:
109
+ jobboard_backend_conf['username'] = (
110
+ CONF.task_flow.jobboard_backend_username)
111
+ if CONF.task_flow.jobboard_backend_password is not None:
112
+ jobboard_backend_conf['password'] = (
113
+ CONF.task_flow.jobboard_backend_password)
98
114
  jobboard_backend_conf.update(
99
115
  CONF.task_flow.jobboard_redis_backend_ssl_options)
100
116
  return job_backends.backend(
@@ -14,6 +14,9 @@
14
14
  #
15
15
 
16
16
  import copy
17
+ from typing import List
18
+ from typing import Optional
19
+
17
20
  from cryptography import fernet
18
21
  from oslo_config import cfg
19
22
  from oslo_log import log as logging
@@ -58,7 +61,7 @@ class AmpRetry(retry.Times):
58
61
  max_retry_attempt = CONF.haproxy_amphora.connection_max_retries
59
62
  for task_name, ex_info in last_errors.items():
60
63
  if len(history) <= max_retry_attempt:
61
- # When taskflow persistance is enabled and flow/task state is
64
+ # When taskflow persistence is enabled and flow/task state is
62
65
  # saved in the backend. If flow(task) is restored(restart of
63
66
  # worker,etc) we are getting ex_info as None - we need to RETRY
64
67
  # task to check its real state.
@@ -102,10 +105,19 @@ class AmpListenersUpdate(BaseAmphoraTask):
102
105
  class AmphoraIndexListenerUpdate(BaseAmphoraTask):
103
106
  """Task to update the listeners on one amphora."""
104
107
 
105
- def execute(self, loadbalancer, amphora_index, amphorae, timeout_dict=()):
108
+ def execute(self, loadbalancer, amphora_index, amphorae,
109
+ amphorae_status: dict, new_amphora_id: str, timeout_dict=()):
106
110
  # Note, we don't want this to cause a revert as it may be used
107
111
  # in a failover flow with both amps failing. Skip it and let
108
112
  # health manager fix it.
113
+
114
+ amphora_id = amphorae[amphora_index].get(constants.ID)
115
+ amphora_status = amphorae_status.get(amphora_id, {})
116
+ if amphora_status.get(constants.UNREACHABLE):
117
+ LOG.warning("Skipping listener update because amphora %s "
118
+ "is not reachable.", amphora_id)
119
+ return
120
+
109
121
  try:
110
122
  # TODO(johnsom) Optimize this to use the dicts and not need the
111
123
  # DB lookups
@@ -120,14 +132,16 @@ class AmphoraIndexListenerUpdate(BaseAmphoraTask):
120
132
  self.amphora_driver.update_amphora_listeners(
121
133
  db_lb, db_amp, timeout_dict)
122
134
  except Exception as e:
123
- amphora_id = amphorae[amphora_index].get(constants.ID)
124
135
  LOG.error('Failed to update listeners on amphora %s. Skipping '
125
136
  'this amphora as it is failing to update due to: %s',
126
137
  amphora_id, str(e))
127
- session = db_apis.get_session()
128
- with session.begin():
129
- self.amphora_repo.update(session, amphora_id,
130
- status=constants.ERROR)
138
+ # Update only the status of the newly created amphora during the
139
+ # failover
140
+ if amphora_id == new_amphora_id:
141
+ session = db_apis.get_session()
142
+ with session.begin():
143
+ self.amphora_repo.update(session, amphora_id,
144
+ status=constants.ERROR)
131
145
 
132
146
 
133
147
  class ListenersUpdate(BaseAmphoraTask):
@@ -193,10 +207,18 @@ class AmphoraIndexListenersReload(BaseAmphoraTask):
193
207
  """Task to reload all listeners on an amphora."""
194
208
 
195
209
  def execute(self, loadbalancer, amphora_index, amphorae,
196
- timeout_dict=None):
210
+ amphorae_status: dict, new_amphora_id: str, timeout_dict=None):
197
211
  """Execute listener reload routines for listeners on an amphora."""
198
212
  if amphorae is None:
199
213
  return
214
+
215
+ amphora_id = amphorae[amphora_index].get(constants.ID)
216
+ amphora_status = amphorae_status.get(amphora_id, {})
217
+ if amphora_status.get(constants.UNREACHABLE):
218
+ LOG.warning("Skipping listener reload because amphora %s "
219
+ "is not reachable.", amphora_id)
220
+ return
221
+
200
222
  # TODO(johnsom) Optimize this to use the dicts and not need the
201
223
  # DB lookups
202
224
  session = db_apis.get_session()
@@ -210,13 +232,15 @@ class AmphoraIndexListenersReload(BaseAmphoraTask):
210
232
  try:
211
233
  self.amphora_driver.reload(db_lb, db_amp, timeout_dict)
212
234
  except Exception as e:
213
- amphora_id = amphorae[amphora_index][constants.ID]
214
235
  LOG.warning('Failed to reload listeners on amphora %s. '
215
236
  'Skipping this amphora as it is failing to '
216
237
  'reload due to: %s', amphora_id, str(e))
217
- with session.begin():
218
- self.amphora_repo.update(session, amphora_id,
219
- status=constants.ERROR)
238
+ # Update only the status of the newly created amphora during
239
+ # the failover
240
+ if amphora_id == new_amphora_id:
241
+ with session.begin():
242
+ self.amphora_repo.update(session, amphora_id,
243
+ status=constants.ERROR)
220
244
 
221
245
 
222
246
  class ListenerDelete(BaseAmphoraTask):
@@ -478,8 +502,15 @@ class AmphoraUpdateVRRPInterface(BaseAmphoraTask):
478
502
  class AmphoraIndexUpdateVRRPInterface(BaseAmphoraTask):
479
503
  """Task to get and update the VRRP interface device name from amphora."""
480
504
 
481
- def execute(self, amphora_index, amphorae, timeout_dict=None):
505
+ def execute(self, amphora_index, amphorae, amphorae_status: dict,
506
+ new_amphora_id: str, timeout_dict=None):
482
507
  amphora_id = amphorae[amphora_index][constants.ID]
508
+ amphora_status = amphorae_status.get(amphora_id, {})
509
+ if amphora_status.get(constants.UNREACHABLE):
510
+ LOG.warning("Skipping VRRP interface update because amphora %s "
511
+ "is not reachable.", amphora_id)
512
+ return None
513
+
483
514
  try:
484
515
  # TODO(johnsom) Optimize this to use the dicts and not need the
485
516
  # DB lookups
@@ -494,9 +525,12 @@ class AmphoraIndexUpdateVRRPInterface(BaseAmphoraTask):
494
525
  LOG.error('Failed to get amphora VRRP interface on amphora '
495
526
  '%s. Skipping this amphora as it is failing due to: '
496
527
  '%s', amphora_id, str(e))
497
- with session.begin():
498
- self.amphora_repo.update(session, amphora_id,
499
- status=constants.ERROR)
528
+ # Update only the status of the newly created amphora during the
529
+ # failover
530
+ if amphora_id == new_amphora_id:
531
+ with session.begin():
532
+ self.amphora_repo.update(session, amphora_id,
533
+ status=constants.ERROR)
500
534
  return None
501
535
 
502
536
  with session.begin():
@@ -542,12 +576,19 @@ class AmphoraIndexVRRPUpdate(BaseAmphoraTask):
542
576
  """Task to update the VRRP configuration of an amphora."""
543
577
 
544
578
  def execute(self, loadbalancer_id, amphorae_network_config, amphora_index,
545
- amphorae, amp_vrrp_int, timeout_dict=None):
579
+ amphorae, amphorae_status: dict, amp_vrrp_int: Optional[str],
580
+ new_amphora_id: str, timeout_dict=None):
546
581
  """Execute update_vrrp_conf."""
547
582
  # Note, we don't want this to cause a revert as it may be used
548
583
  # in a failover flow with both amps failing. Skip it and let
549
584
  # health manager fix it.
550
585
  amphora_id = amphorae[amphora_index][constants.ID]
586
+ amphora_status = amphorae_status.get(amphora_id, {})
587
+ if amphora_status.get(constants.UNREACHABLE):
588
+ LOG.warning("Skipping VRRP configuration because amphora %s "
589
+ "is not reachable.", amphora_id)
590
+ return
591
+
551
592
  try:
552
593
  # TODO(johnsom) Optimize this to use the dicts and not need the
553
594
  # DB lookups
@@ -564,9 +605,12 @@ class AmphoraIndexVRRPUpdate(BaseAmphoraTask):
564
605
  LOG.error('Failed to update VRRP configuration amphora %s. '
565
606
  'Skipping this amphora as it is failing to update due '
566
607
  'to: %s', amphora_id, str(e))
567
- with session.begin():
568
- self.amphora_repo.update(session, amphora_id,
569
- status=constants.ERROR)
608
+ # Update only the status of the newly created amphora during the
609
+ # failover
610
+ if amphora_id == new_amphora_id:
611
+ with session.begin():
612
+ self.amphora_repo.update(session, amphora_id,
613
+ status=constants.ERROR)
570
614
  return
571
615
  LOG.debug("Uploaded VRRP configuration of amphora %s.", amphora_id)
572
616
 
@@ -594,10 +638,17 @@ class AmphoraIndexVRRPStart(BaseAmphoraTask):
594
638
  This will reload keepalived if it is already running.
595
639
  """
596
640
 
597
- def execute(self, amphora_index, amphorae, timeout_dict=None):
641
+ def execute(self, amphora_index, amphorae, amphorae_status: dict,
642
+ new_amphora_id: str, timeout_dict=None):
598
643
  # TODO(johnsom) Optimize this to use the dicts and not need the
599
644
  # DB lookups
600
645
  amphora_id = amphorae[amphora_index][constants.ID]
646
+ amphora_status = amphorae_status.get(amphora_id, {})
647
+ if amphora_status.get(constants.UNREACHABLE):
648
+ LOG.warning("Skipping VRRP start because amphora %s "
649
+ "is not reachable.", amphora_id)
650
+ return
651
+
601
652
  session = db_apis.get_session()
602
653
  with session.begin():
603
654
  db_amp = self.amphora_repo.get(session, id=amphora_id)
@@ -607,9 +658,12 @@ class AmphoraIndexVRRPStart(BaseAmphoraTask):
607
658
  LOG.error('Failed to start VRRP on amphora %s. '
608
659
  'Skipping this amphora as it is failing to start due '
609
660
  'to: %s', amphora_id, str(e))
610
- with session.begin():
611
- self.amphora_repo.update(session, amphora_id,
612
- status=constants.ERROR)
661
+ # Update only the status of the newly created amphora during the
662
+ # failover
663
+ if amphora_id == new_amphora_id:
664
+ with session.begin():
665
+ self.amphora_repo.update(session, amphora_id,
666
+ status=constants.ERROR)
613
667
  return
614
668
  LOG.debug("Started VRRP on amphora %s.",
615
669
  amphorae[amphora_index][constants.ID])
@@ -669,3 +723,70 @@ class AmphoraConfigUpdate(BaseAmphoraTask):
669
723
  'update. Please update the amphora image for this '
670
724
  'amphora. Skipping.'.
671
725
  format(amphora.get(constants.ID)))
726
+
727
+
728
+ class AmphoraeGetConnectivityStatus(BaseAmphoraTask):
729
+ """Task that checks amphorae connectivity status.
730
+
731
+ Check and return the connectivity status of both amphorae in ACTIVE STANDBY
732
+ load balancers
733
+ """
734
+
735
+ def execute(self, amphorae: List[dict], new_amphora_id: str,
736
+ timeout_dict=None):
737
+ amphorae_status = {}
738
+
739
+ for amphora in amphorae:
740
+ amphora_id = amphora[constants.ID]
741
+ amphorae_status[amphora_id] = {}
742
+
743
+ session = db_apis.get_session()
744
+ with session.begin():
745
+ db_amp = self.amphora_repo.get(session, id=amphora_id)
746
+
747
+ try:
748
+ # Verify if the amphora is reachable
749
+ self.amphora_driver.check(db_amp, timeout_dict=timeout_dict)
750
+ except Exception as e:
751
+ LOG.exception("Cannot get status for amphora %s",
752
+ amphora_id)
753
+ # In case it fails and the tested amphora is the newly created
754
+ # amphora, it's not a normal error handling, re-raise the
755
+ # exception
756
+ if amphora_id == new_amphora_id:
757
+ raise e
758
+ amphorae_status[amphora_id][constants.UNREACHABLE] = True
759
+ else:
760
+ amphorae_status[amphora_id][constants.UNREACHABLE] = False
761
+
762
+ return amphorae_status
763
+
764
+
765
+ class SetAmphoraFirewallRules(BaseAmphoraTask):
766
+ """Task to push updated firewall ruls to an amphora."""
767
+
768
+ def execute(self, amphorae: List[dict], amphora_index: int,
769
+ amphora_firewall_rules: List[dict], amphorae_status: dict,
770
+ timeout_dict=None):
771
+
772
+ if (amphora_firewall_rules and
773
+ amphora_firewall_rules[0].get('non-sriov-vip', False)):
774
+ # Not an SRIOV VIP, so skip setting firewall rules.
775
+ # This is already logged in GetAmphoraFirewallRules.
776
+ return
777
+
778
+ amphora_id = amphorae[amphora_index][constants.ID]
779
+ amphora_status = amphorae_status.get(amphora_id, {})
780
+ if amphora_status.get(constants.UNREACHABLE):
781
+ LOG.warning("Skipping firewall rules update because amphora %s "
782
+ "is not reachable.", amphora_id)
783
+ return
784
+
785
+ session = db_apis.get_session()
786
+ with session.begin():
787
+ db_amp = self.amphora_repo.get(session, id=amphora_id)
788
+
789
+ self.amphora_driver.set_interface_rules(
790
+ db_amp,
791
+ amphorae[amphora_index][constants.VRRP_IP],
792
+ amphora_firewall_rules, timeout_dict=timeout_dict)
@@ -59,7 +59,7 @@ class ComputeRetry(retry.Times):
59
59
  max_retry_attempt = CONF.controller_worker.amp_active_retries
60
60
  for task_name, ex_info in last_errors.items():
61
61
  if len(history) <= max_retry_attempt:
62
- # When taskflow persistance is enabled and flow/task state is
62
+ # When taskflow persistence is enabled and flow/task state is
63
63
  # saved in the backend. If flow(task) is restored(restart of
64
64
  # worker,etc) we are getting ex_info as None - we need to RETRY
65
65
  # task to check its real state.