octavia 13.0.0__py3-none-any.whl → 13.0.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- octavia/amphorae/backends/agent/api_server/keepalivedlvs.py +9 -0
- octavia/amphorae/backends/agent/api_server/osutils.py +1 -2
- octavia/amphorae/backends/agent/api_server/util.py +35 -2
- octavia/amphorae/backends/utils/interface.py +4 -5
- octavia/amphorae/drivers/driver_base.py +16 -0
- octavia/amphorae/drivers/haproxy/rest_api_driver.py +13 -8
- octavia/amphorae/drivers/keepalived/jinja/jinja_cfg.py +0 -1
- octavia/amphorae/drivers/keepalived/jinja/templates/keepalived_base.template +0 -1
- octavia/amphorae/drivers/keepalived/vrrp_rest_driver.py +2 -1
- octavia/amphorae/drivers/noop_driver/driver.py +3 -0
- octavia/api/common/pagination.py +1 -1
- octavia/api/v2/controllers/health_monitor.py +3 -2
- octavia/api/v2/controllers/l7policy.py +0 -1
- octavia/api/v2/controllers/l7rule.py +0 -1
- octavia/api/v2/controllers/listener.py +0 -1
- octavia/api/v2/controllers/load_balancer.py +13 -7
- octavia/api/v2/controllers/member.py +18 -5
- octavia/api/v2/controllers/pool.py +6 -7
- octavia/api/v2/types/pool.py +1 -1
- octavia/certificates/common/pkcs12.py +9 -9
- octavia/certificates/manager/barbican.py +24 -16
- octavia/certificates/manager/castellan_mgr.py +12 -7
- octavia/certificates/manager/noop.py +106 -0
- octavia/common/clients.py +22 -4
- octavia/common/config.py +21 -5
- octavia/common/constants.py +4 -0
- octavia/common/exceptions.py +6 -0
- octavia/common/jinja/haproxy/combined_listeners/templates/macros.j2 +7 -5
- octavia/common/keystone.py +7 -7
- octavia/common/tls_utils/cert_parser.py +23 -9
- octavia/controller/worker/task_utils.py +28 -6
- octavia/controller/worker/v2/controller_worker.py +2 -2
- octavia/controller/worker/v2/flows/amphora_flows.py +41 -10
- octavia/controller/worker/v2/flows/flow_utils.py +6 -4
- octavia/controller/worker/v2/flows/load_balancer_flows.py +17 -3
- octavia/controller/worker/v2/tasks/amphora_driver_tasks.py +114 -23
- octavia/controller/worker/v2/tasks/database_tasks.py +36 -47
- octavia/controller/worker/v2/tasks/lifecycle_tasks.py +96 -40
- octavia/controller/worker/v2/tasks/network_tasks.py +12 -13
- octavia/db/base_models.py +16 -4
- octavia/db/repositories.py +34 -33
- octavia/network/drivers/neutron/allowed_address_pairs.py +10 -8
- octavia/network/drivers/noop_driver/driver.py +1 -2
- octavia/tests/common/sample_certs.py +115 -0
- octavia/tests/functional/api/v2/base.py +1 -1
- octavia/tests/functional/api/v2/test_health_monitor.py +18 -0
- octavia/tests/functional/api/v2/test_listener.py +45 -0
- octavia/tests/functional/api/v2/test_member.py +32 -0
- octavia/tests/functional/db/base.py +9 -0
- octavia/tests/functional/db/test_repositories.py +45 -98
- octavia/tests/unit/amphorae/backends/agent/api_server/test_util.py +89 -1
- octavia/tests/unit/amphorae/backends/utils/test_interface.py +3 -1
- octavia/tests/unit/amphorae/drivers/haproxy/test_rest_api_driver.py +3 -3
- octavia/tests/unit/amphorae/drivers/keepalived/jinja/test_jinja_cfg.py +0 -4
- octavia/tests/unit/amphorae/drivers/keepalived/test_vrrp_rest_driver.py +17 -0
- octavia/tests/unit/api/common/test_pagination.py +78 -1
- octavia/tests/unit/api/v2/types/test_pool.py +71 -0
- octavia/tests/unit/certificates/manager/test_barbican.py +3 -3
- octavia/tests/unit/certificates/manager/test_noop.py +53 -0
- octavia/tests/unit/cmd/test_prometheus_proxy.py +8 -1
- octavia/tests/unit/common/jinja/haproxy/combined_listeners/test_jinja_cfg.py +16 -17
- octavia/tests/unit/common/test_config.py +35 -0
- octavia/tests/unit/common/test_keystone.py +32 -0
- octavia/tests/unit/controller/worker/test_task_utils.py +58 -2
- octavia/tests/unit/controller/worker/v2/flows/test_amphora_flows.py +28 -5
- octavia/tests/unit/controller/worker/v2/flows/test_load_balancer_flows.py +10 -5
- octavia/tests/unit/controller/worker/v2/tasks/test_amphora_driver_tasks.py +234 -17
- octavia/tests/unit/controller/worker/v2/tasks/test_database_tasks.py +28 -6
- octavia/tests/unit/controller/worker/v2/tasks/test_database_tasks_quota.py +19 -19
- octavia/tests/unit/controller/worker/v2/tasks/test_network_tasks.py +57 -2
- octavia/tests/unit/controller/worker/v2/test_controller_worker.py +56 -1
- octavia/tests/unit/network/drivers/neutron/test_allowed_address_pairs.py +24 -1
- {octavia-13.0.0.dist-info → octavia-13.0.1.dist-info}/AUTHORS +8 -0
- octavia-13.0.1.dist-info/METADATA +155 -0
- {octavia-13.0.0.dist-info → octavia-13.0.1.dist-info}/RECORD +90 -88
- {octavia-13.0.0.dist-info → octavia-13.0.1.dist-info}/WHEEL +1 -1
- {octavia-13.0.0.dist-info → octavia-13.0.1.dist-info}/entry_points.txt +1 -1
- octavia-13.0.1.dist-info/pbr.json +1 -0
- octavia-13.0.0.dist-info/METADATA +0 -158
- octavia-13.0.0.dist-info/pbr.json +0 -1
- {octavia-13.0.0.data → octavia-13.0.1.data}/data/share/octavia/LICENSE +0 -0
- {octavia-13.0.0.data → octavia-13.0.1.data}/data/share/octavia/README.rst +0 -0
- {octavia-13.0.0.data → octavia-13.0.1.data}/data/share/octavia/diskimage-create/README.rst +0 -0
- {octavia-13.0.0.data → octavia-13.0.1.data}/data/share/octavia/diskimage-create/diskimage-create.sh +0 -0
- {octavia-13.0.0.data → octavia-13.0.1.data}/data/share/octavia/diskimage-create/image-tests.sh +0 -0
- {octavia-13.0.0.data → octavia-13.0.1.data}/data/share/octavia/diskimage-create/requirements.txt +0 -0
- {octavia-13.0.0.data → octavia-13.0.1.data}/data/share/octavia/diskimage-create/test-requirements.txt +0 -0
- {octavia-13.0.0.data → octavia-13.0.1.data}/data/share/octavia/diskimage-create/tox.ini +0 -0
- {octavia-13.0.0.data → octavia-13.0.1.data}/data/share/octavia/diskimage-create/version.txt +0 -0
- {octavia-13.0.0.data → octavia-13.0.1.data}/scripts/octavia-wsgi +0 -0
- {octavia-13.0.0.dist-info → octavia-13.0.1.dist-info}/LICENSE +0 -0
- {octavia-13.0.0.dist-info → octavia-13.0.1.dist-info}/top_level.txt +0 -0
@@ -637,6 +637,14 @@ class LoadBalancerFlows(object):
|
|
637
637
|
requires=constants.LOADBALANCER_ID,
|
638
638
|
provides=constants.AMPHORAE))
|
639
639
|
|
640
|
+
failover_LB_flow.add(
|
641
|
+
amphora_driver_tasks.AmphoraeGetConnectivityStatus(
|
642
|
+
name=(new_amp_role + '-' +
|
643
|
+
constants.AMPHORAE_GET_CONNECTIVITY_STATUS),
|
644
|
+
requires=constants.AMPHORAE,
|
645
|
+
rebind={constants.NEW_AMPHORA_ID: constants.AMPHORA_ID},
|
646
|
+
provides=constants.AMPHORAE_STATUS))
|
647
|
+
|
640
648
|
# Listeners update needs to be run on all amphora to update
|
641
649
|
# their peer configurations. So parallelize this with an
|
642
650
|
# unordered subflow.
|
@@ -651,14 +659,18 @@ class LoadBalancerFlows(object):
|
|
651
659
|
amphora_driver_tasks.AmphoraIndexListenerUpdate(
|
652
660
|
name=(constants.AMPHORA + '-0-' +
|
653
661
|
constants.AMP_LISTENER_UPDATE),
|
654
|
-
requires=(constants.LOADBALANCER, constants.AMPHORAE
|
662
|
+
requires=(constants.LOADBALANCER, constants.AMPHORAE,
|
663
|
+
constants.AMPHORAE_STATUS),
|
664
|
+
rebind={constants.NEW_AMPHORA_ID: constants.AMPHORA_ID},
|
655
665
|
inject={constants.AMPHORA_INDEX: 0,
|
656
666
|
constants.TIMEOUT_DICT: timeout_dict}))
|
657
667
|
update_amps_subflow.add(
|
658
668
|
amphora_driver_tasks.AmphoraIndexListenerUpdate(
|
659
669
|
name=(constants.AMPHORA + '-1-' +
|
660
670
|
constants.AMP_LISTENER_UPDATE),
|
661
|
-
requires=(constants.LOADBALANCER, constants.AMPHORAE
|
671
|
+
requires=(constants.LOADBALANCER, constants.AMPHORAE,
|
672
|
+
constants.AMPHORAE_STATUS),
|
673
|
+
rebind={constants.NEW_AMPHORA_ID: constants.AMPHORA_ID},
|
662
674
|
inject={constants.AMPHORA_INDEX: 1,
|
663
675
|
constants.TIMEOUT_DICT: timeout_dict}))
|
664
676
|
|
@@ -667,7 +679,8 @@ class LoadBalancerFlows(object):
|
|
667
679
|
# Configure and enable keepalived in the amphora
|
668
680
|
failover_LB_flow.add(self.amp_flows.get_vrrp_subflow(
|
669
681
|
new_amp_role + '-' + constants.GET_VRRP_SUBFLOW,
|
670
|
-
timeout_dict, create_vrrp_group=False
|
682
|
+
timeout_dict, create_vrrp_group=False,
|
683
|
+
get_amphorae_status=False))
|
671
684
|
|
672
685
|
# #### End of standby ####
|
673
686
|
|
@@ -682,6 +695,7 @@ class LoadBalancerFlows(object):
|
|
682
695
|
name=(new_amp_role + '-' +
|
683
696
|
constants.AMPHORA_RELOAD_LISTENER),
|
684
697
|
requires=(constants.LOADBALANCER, constants.AMPHORAE),
|
698
|
+
rebind={constants.NEW_AMPHORA_ID: constants.AMPHORA_ID},
|
685
699
|
inject={constants.AMPHORA_INDEX: 1,
|
686
700
|
constants.TIMEOUT_DICT: timeout_dict}))
|
687
701
|
|
@@ -14,6 +14,9 @@
|
|
14
14
|
#
|
15
15
|
|
16
16
|
import copy
|
17
|
+
from typing import List
|
18
|
+
from typing import Optional
|
19
|
+
|
17
20
|
from cryptography import fernet
|
18
21
|
from oslo_config import cfg
|
19
22
|
from oslo_log import log as logging
|
@@ -102,10 +105,19 @@ class AmpListenersUpdate(BaseAmphoraTask):
|
|
102
105
|
class AmphoraIndexListenerUpdate(BaseAmphoraTask):
|
103
106
|
"""Task to update the listeners on one amphora."""
|
104
107
|
|
105
|
-
def execute(self, loadbalancer, amphora_index, amphorae,
|
108
|
+
def execute(self, loadbalancer, amphora_index, amphorae,
|
109
|
+
amphorae_status: dict, new_amphora_id: str, timeout_dict=()):
|
106
110
|
# Note, we don't want this to cause a revert as it may be used
|
107
111
|
# in a failover flow with both amps failing. Skip it and let
|
108
112
|
# health manager fix it.
|
113
|
+
|
114
|
+
amphora_id = amphorae[amphora_index].get(constants.ID)
|
115
|
+
amphora_status = amphorae_status.get(amphora_id, {})
|
116
|
+
if amphora_status.get(constants.UNREACHABLE):
|
117
|
+
LOG.warning("Skipping listener update because amphora %s "
|
118
|
+
"is not reachable.", amphora_id)
|
119
|
+
return
|
120
|
+
|
109
121
|
try:
|
110
122
|
# TODO(johnsom) Optimize this to use the dicts and not need the
|
111
123
|
# DB lookups
|
@@ -120,14 +132,16 @@ class AmphoraIndexListenerUpdate(BaseAmphoraTask):
|
|
120
132
|
self.amphora_driver.update_amphora_listeners(
|
121
133
|
db_lb, db_amp, timeout_dict)
|
122
134
|
except Exception as e:
|
123
|
-
amphora_id = amphorae[amphora_index].get(constants.ID)
|
124
135
|
LOG.error('Failed to update listeners on amphora %s. Skipping '
|
125
136
|
'this amphora as it is failing to update due to: %s',
|
126
137
|
amphora_id, str(e))
|
127
|
-
|
128
|
-
|
129
|
-
|
130
|
-
|
138
|
+
# Update only the status of the newly created amphora during the
|
139
|
+
# failover
|
140
|
+
if amphora_id == new_amphora_id:
|
141
|
+
session = db_apis.get_session()
|
142
|
+
with session.begin():
|
143
|
+
self.amphora_repo.update(session, amphora_id,
|
144
|
+
status=constants.ERROR)
|
131
145
|
|
132
146
|
|
133
147
|
class ListenersUpdate(BaseAmphoraTask):
|
@@ -193,10 +207,18 @@ class AmphoraIndexListenersReload(BaseAmphoraTask):
|
|
193
207
|
"""Task to reload all listeners on an amphora."""
|
194
208
|
|
195
209
|
def execute(self, loadbalancer, amphora_index, amphorae,
|
196
|
-
timeout_dict=None):
|
210
|
+
amphorae_status: dict, new_amphora_id: str, timeout_dict=None):
|
197
211
|
"""Execute listener reload routines for listeners on an amphora."""
|
198
212
|
if amphorae is None:
|
199
213
|
return
|
214
|
+
|
215
|
+
amphora_id = amphorae[amphora_index].get(constants.ID)
|
216
|
+
amphora_status = amphorae_status.get(amphora_id, {})
|
217
|
+
if amphora_status.get(constants.UNREACHABLE):
|
218
|
+
LOG.warning("Skipping listener reload because amphora %s "
|
219
|
+
"is not reachable.", amphora_id)
|
220
|
+
return
|
221
|
+
|
200
222
|
# TODO(johnsom) Optimize this to use the dicts and not need the
|
201
223
|
# DB lookups
|
202
224
|
session = db_apis.get_session()
|
@@ -210,13 +232,15 @@ class AmphoraIndexListenersReload(BaseAmphoraTask):
|
|
210
232
|
try:
|
211
233
|
self.amphora_driver.reload(db_lb, db_amp, timeout_dict)
|
212
234
|
except Exception as e:
|
213
|
-
amphora_id = amphorae[amphora_index][constants.ID]
|
214
235
|
LOG.warning('Failed to reload listeners on amphora %s. '
|
215
236
|
'Skipping this amphora as it is failing to '
|
216
237
|
'reload due to: %s', amphora_id, str(e))
|
217
|
-
|
218
|
-
|
219
|
-
|
238
|
+
# Update only the status of the newly created amphora during
|
239
|
+
# the failover
|
240
|
+
if amphora_id == new_amphora_id:
|
241
|
+
with session.begin():
|
242
|
+
self.amphora_repo.update(session, amphora_id,
|
243
|
+
status=constants.ERROR)
|
220
244
|
|
221
245
|
|
222
246
|
class ListenerDelete(BaseAmphoraTask):
|
@@ -478,8 +502,15 @@ class AmphoraUpdateVRRPInterface(BaseAmphoraTask):
|
|
478
502
|
class AmphoraIndexUpdateVRRPInterface(BaseAmphoraTask):
|
479
503
|
"""Task to get and update the VRRP interface device name from amphora."""
|
480
504
|
|
481
|
-
def execute(self, amphora_index, amphorae,
|
505
|
+
def execute(self, amphora_index, amphorae, amphorae_status: dict,
|
506
|
+
new_amphora_id: str, timeout_dict=None):
|
482
507
|
amphora_id = amphorae[amphora_index][constants.ID]
|
508
|
+
amphora_status = amphorae_status.get(amphora_id, {})
|
509
|
+
if amphora_status.get(constants.UNREACHABLE):
|
510
|
+
LOG.warning("Skipping VRRP interface update because amphora %s "
|
511
|
+
"is not reachable.", amphora_id)
|
512
|
+
return None
|
513
|
+
|
483
514
|
try:
|
484
515
|
# TODO(johnsom) Optimize this to use the dicts and not need the
|
485
516
|
# DB lookups
|
@@ -494,9 +525,12 @@ class AmphoraIndexUpdateVRRPInterface(BaseAmphoraTask):
|
|
494
525
|
LOG.error('Failed to get amphora VRRP interface on amphora '
|
495
526
|
'%s. Skipping this amphora as it is failing due to: '
|
496
527
|
'%s', amphora_id, str(e))
|
497
|
-
|
498
|
-
|
499
|
-
|
528
|
+
# Update only the status of the newly created amphora during the
|
529
|
+
# failover
|
530
|
+
if amphora_id == new_amphora_id:
|
531
|
+
with session.begin():
|
532
|
+
self.amphora_repo.update(session, amphora_id,
|
533
|
+
status=constants.ERROR)
|
500
534
|
return None
|
501
535
|
|
502
536
|
with session.begin():
|
@@ -542,12 +576,19 @@ class AmphoraIndexVRRPUpdate(BaseAmphoraTask):
|
|
542
576
|
"""Task to update the VRRP configuration of an amphora."""
|
543
577
|
|
544
578
|
def execute(self, loadbalancer_id, amphorae_network_config, amphora_index,
|
545
|
-
amphorae,
|
579
|
+
amphorae, amphorae_status: dict, amp_vrrp_int: Optional[str],
|
580
|
+
new_amphora_id: str, timeout_dict=None):
|
546
581
|
"""Execute update_vrrp_conf."""
|
547
582
|
# Note, we don't want this to cause a revert as it may be used
|
548
583
|
# in a failover flow with both amps failing. Skip it and let
|
549
584
|
# health manager fix it.
|
550
585
|
amphora_id = amphorae[amphora_index][constants.ID]
|
586
|
+
amphora_status = amphorae_status.get(amphora_id, {})
|
587
|
+
if amphora_status.get(constants.UNREACHABLE):
|
588
|
+
LOG.warning("Skipping VRRP configuration because amphora %s "
|
589
|
+
"is not reachable.", amphora_id)
|
590
|
+
return
|
591
|
+
|
551
592
|
try:
|
552
593
|
# TODO(johnsom) Optimize this to use the dicts and not need the
|
553
594
|
# DB lookups
|
@@ -564,9 +605,12 @@ class AmphoraIndexVRRPUpdate(BaseAmphoraTask):
|
|
564
605
|
LOG.error('Failed to update VRRP configuration amphora %s. '
|
565
606
|
'Skipping this amphora as it is failing to update due '
|
566
607
|
'to: %s', amphora_id, str(e))
|
567
|
-
|
568
|
-
|
569
|
-
|
608
|
+
# Update only the status of the newly created amphora during the
|
609
|
+
# failover
|
610
|
+
if amphora_id == new_amphora_id:
|
611
|
+
with session.begin():
|
612
|
+
self.amphora_repo.update(session, amphora_id,
|
613
|
+
status=constants.ERROR)
|
570
614
|
return
|
571
615
|
LOG.debug("Uploaded VRRP configuration of amphora %s.", amphora_id)
|
572
616
|
|
@@ -594,10 +638,17 @@ class AmphoraIndexVRRPStart(BaseAmphoraTask):
|
|
594
638
|
This will reload keepalived if it is already running.
|
595
639
|
"""
|
596
640
|
|
597
|
-
def execute(self, amphora_index, amphorae,
|
641
|
+
def execute(self, amphora_index, amphorae, amphorae_status: dict,
|
642
|
+
new_amphora_id: str, timeout_dict=None):
|
598
643
|
# TODO(johnsom) Optimize this to use the dicts and not need the
|
599
644
|
# DB lookups
|
600
645
|
amphora_id = amphorae[amphora_index][constants.ID]
|
646
|
+
amphora_status = amphorae_status.get(amphora_id, {})
|
647
|
+
if amphora_status.get(constants.UNREACHABLE):
|
648
|
+
LOG.warning("Skipping VRRP start because amphora %s "
|
649
|
+
"is not reachable.", amphora_id)
|
650
|
+
return
|
651
|
+
|
601
652
|
session = db_apis.get_session()
|
602
653
|
with session.begin():
|
603
654
|
db_amp = self.amphora_repo.get(session, id=amphora_id)
|
@@ -607,9 +658,12 @@ class AmphoraIndexVRRPStart(BaseAmphoraTask):
|
|
607
658
|
LOG.error('Failed to start VRRP on amphora %s. '
|
608
659
|
'Skipping this amphora as it is failing to start due '
|
609
660
|
'to: %s', amphora_id, str(e))
|
610
|
-
|
611
|
-
|
612
|
-
|
661
|
+
# Update only the status of the newly created amphora during the
|
662
|
+
# failover
|
663
|
+
if amphora_id == new_amphora_id:
|
664
|
+
with session.begin():
|
665
|
+
self.amphora_repo.update(session, amphora_id,
|
666
|
+
status=constants.ERROR)
|
613
667
|
return
|
614
668
|
LOG.debug("Started VRRP on amphora %s.",
|
615
669
|
amphorae[amphora_index][constants.ID])
|
@@ -669,3 +723,40 @@ class AmphoraConfigUpdate(BaseAmphoraTask):
|
|
669
723
|
'update. Please update the amphora image for this '
|
670
724
|
'amphora. Skipping.'.
|
671
725
|
format(amphora.get(constants.ID)))
|
726
|
+
|
727
|
+
|
728
|
+
class AmphoraeGetConnectivityStatus(BaseAmphoraTask):
|
729
|
+
"""Task that checks amphorae connectivity status.
|
730
|
+
|
731
|
+
Check and return the connectivity status of both amphorae in ACTIVE STANDBY
|
732
|
+
load balancers
|
733
|
+
"""
|
734
|
+
|
735
|
+
def execute(self, amphorae: List[dict], new_amphora_id: str,
|
736
|
+
timeout_dict=None):
|
737
|
+
amphorae_status = {}
|
738
|
+
|
739
|
+
for amphora in amphorae:
|
740
|
+
amphora_id = amphora[constants.ID]
|
741
|
+
amphorae_status[amphora_id] = {}
|
742
|
+
|
743
|
+
session = db_apis.get_session()
|
744
|
+
with session.begin():
|
745
|
+
db_amp = self.amphora_repo.get(session, id=amphora_id)
|
746
|
+
|
747
|
+
try:
|
748
|
+
# Verify if the amphora is reachable
|
749
|
+
self.amphora_driver.check(db_amp, timeout_dict=timeout_dict)
|
750
|
+
except Exception as e:
|
751
|
+
LOG.exception("Cannot get status for amphora %s",
|
752
|
+
amphora_id)
|
753
|
+
# In case it fails and the tested amphora is the newly created
|
754
|
+
# amphora, it's not a normal error handling, re-raise the
|
755
|
+
# exception
|
756
|
+
if amphora_id == new_amphora_id:
|
757
|
+
raise e
|
758
|
+
amphorae_status[amphora_id][constants.UNREACHABLE] = True
|
759
|
+
else:
|
760
|
+
amphorae_status[amphora_id][constants.UNREACHABLE] = False
|
761
|
+
|
762
|
+
return amphorae_status
|
@@ -130,13 +130,17 @@ class CreateAmphoraInDB(BaseDatabaseTask):
|
|
130
130
|
LOG.warning("Reverting create amphora in DB for amp id %s ", result)
|
131
131
|
|
132
132
|
# Delete the amphora for now. May want to just update status later
|
133
|
-
|
134
|
-
|
133
|
+
with db_apis.session().begin() as session:
|
134
|
+
try:
|
135
135
|
self.amphora_repo.delete(session, id=result)
|
136
|
-
|
137
|
-
|
138
|
-
|
139
|
-
|
136
|
+
except Exception as e:
|
137
|
+
LOG.error("Failed to delete amphora %(amp)s "
|
138
|
+
"in the database due to: "
|
139
|
+
"%(except)s", {'amp': result, 'except': str(e)})
|
140
|
+
try:
|
141
|
+
self.amp_health_repo.delete(session, amphora_id=result)
|
142
|
+
except Exception:
|
143
|
+
pass
|
140
144
|
|
141
145
|
|
142
146
|
class MarkLBAmphoraeDeletedInDB(BaseDatabaseTask):
|
@@ -2621,15 +2625,13 @@ class DecrementHealthMonitorQuota(BaseDatabaseTask):
|
|
2621
2625
|
|
2622
2626
|
try:
|
2623
2627
|
session = db_apis.get_session()
|
2624
|
-
lock_session = db_apis.get_session()
|
2625
2628
|
try:
|
2626
2629
|
self.repos.check_quota_met(session,
|
2627
|
-
lock_session,
|
2628
2630
|
data_models.HealthMonitor,
|
2629
2631
|
project_id)
|
2630
|
-
|
2632
|
+
session.commit()
|
2631
2633
|
except Exception:
|
2632
|
-
|
2634
|
+
session.rollback()
|
2633
2635
|
except Exception:
|
2634
2636
|
# Don't fail the revert flow
|
2635
2637
|
pass
|
@@ -2679,15 +2681,13 @@ class DecrementListenerQuota(BaseDatabaseTask):
|
|
2679
2681
|
|
2680
2682
|
try:
|
2681
2683
|
session = db_apis.get_session()
|
2682
|
-
lock_session = db_apis.get_session()
|
2683
2684
|
try:
|
2684
2685
|
self.repos.check_quota_met(session,
|
2685
|
-
lock_session,
|
2686
2686
|
data_models.Listener,
|
2687
2687
|
project_id)
|
2688
|
-
|
2688
|
+
session.commit()
|
2689
2689
|
except Exception:
|
2690
|
-
|
2690
|
+
session.rollback()
|
2691
2691
|
except Exception:
|
2692
2692
|
# Don't fail the revert flow
|
2693
2693
|
pass
|
@@ -2739,15 +2739,13 @@ class DecrementLoadBalancerQuota(BaseDatabaseTask):
|
|
2739
2739
|
|
2740
2740
|
try:
|
2741
2741
|
session = db_apis.get_session()
|
2742
|
-
lock_session = db_apis.get_session()
|
2743
2742
|
try:
|
2744
2743
|
self.repos.check_quota_met(session,
|
2745
|
-
lock_session,
|
2746
2744
|
data_models.LoadBalancer,
|
2747
2745
|
project_id)
|
2748
|
-
|
2746
|
+
session.commit()
|
2749
2747
|
except Exception:
|
2750
|
-
|
2748
|
+
session.rollback()
|
2751
2749
|
except Exception:
|
2752
2750
|
# Don't fail the revert flow
|
2753
2751
|
pass
|
@@ -2798,15 +2796,13 @@ class DecrementMemberQuota(BaseDatabaseTask):
|
|
2798
2796
|
|
2799
2797
|
try:
|
2800
2798
|
session = db_apis.get_session()
|
2801
|
-
lock_session = db_apis.get_session()
|
2802
2799
|
try:
|
2803
2800
|
self.repos.check_quota_met(session,
|
2804
|
-
lock_session,
|
2805
2801
|
data_models.Member,
|
2806
2802
|
project_id)
|
2807
|
-
|
2803
|
+
session.commit()
|
2808
2804
|
except Exception:
|
2809
|
-
|
2805
|
+
session.rollback()
|
2810
2806
|
except Exception:
|
2811
2807
|
# Don't fail the revert flow
|
2812
2808
|
pass
|
@@ -2871,42 +2867,38 @@ class DecrementPoolQuota(BaseDatabaseTask):
|
|
2871
2867
|
# in case other quota actions have occurred
|
2872
2868
|
try:
|
2873
2869
|
session = db_apis.get_session()
|
2874
|
-
lock_session = db_apis.get_session()
|
2875
2870
|
try:
|
2876
2871
|
self.repos.check_quota_met(session,
|
2877
|
-
lock_session,
|
2878
2872
|
data_models.Pool,
|
2879
2873
|
project_id)
|
2880
|
-
|
2874
|
+
session.commit()
|
2881
2875
|
except Exception:
|
2882
|
-
|
2876
|
+
session.rollback()
|
2883
2877
|
|
2884
2878
|
# Attempt to increment back the health monitor quota
|
2885
2879
|
if pool_child_count['HM'] > 0:
|
2886
|
-
|
2880
|
+
session = db_apis.get_session()
|
2887
2881
|
try:
|
2888
2882
|
self.repos.check_quota_met(session,
|
2889
|
-
lock_session,
|
2890
2883
|
data_models.HealthMonitor,
|
2891
2884
|
project_id)
|
2892
|
-
|
2885
|
+
session.commit()
|
2893
2886
|
except Exception:
|
2894
|
-
|
2887
|
+
session.rollback()
|
2895
2888
|
|
2896
2889
|
# Attempt to increment back the member quota
|
2897
2890
|
# This is separate calls to maximize the correction
|
2898
2891
|
# should other factors have increased the in use quota
|
2899
2892
|
# before this point in the revert flow
|
2900
2893
|
for i in range(pool_child_count['member']):
|
2901
|
-
|
2894
|
+
session = db_apis.get_session()
|
2902
2895
|
try:
|
2903
2896
|
self.repos.check_quota_met(session,
|
2904
|
-
lock_session,
|
2905
2897
|
data_models.Member,
|
2906
2898
|
project_id)
|
2907
|
-
|
2899
|
+
session.commit()
|
2908
2900
|
except Exception:
|
2909
|
-
|
2901
|
+
session.rollback()
|
2910
2902
|
except Exception:
|
2911
2903
|
# Don't fail the revert flow
|
2912
2904
|
pass
|
@@ -2954,7 +2946,8 @@ class DecrementL7policyQuota(BaseDatabaseTask):
|
|
2954
2946
|
data_models.L7Policy,
|
2955
2947
|
l7policy[constants.PROJECT_ID])
|
2956
2948
|
db_l7policy = self.l7policy_repo.get(
|
2957
|
-
|
2949
|
+
lock_session,
|
2950
|
+
id=l7policy[constants.L7POLICY_ID])
|
2958
2951
|
|
2959
2952
|
if db_l7policy and db_l7policy.l7rules:
|
2960
2953
|
self.repos.decrement_quota(lock_session,
|
@@ -2982,28 +2975,26 @@ class DecrementL7policyQuota(BaseDatabaseTask):
|
|
2982
2975
|
if not isinstance(result, failure.Failure):
|
2983
2976
|
try:
|
2984
2977
|
session = db_apis.get_session()
|
2985
|
-
lock_session = db_apis.get_session()
|
2986
2978
|
try:
|
2987
2979
|
self.repos.check_quota_met(session,
|
2988
|
-
lock_session,
|
2989
2980
|
data_models.L7Policy,
|
2990
2981
|
l7policy[constants.PROJECT_ID])
|
2991
|
-
|
2982
|
+
session.commit()
|
2992
2983
|
except Exception:
|
2993
|
-
|
2984
|
+
session.rollback()
|
2994
2985
|
db_l7policy = self.l7policy_repo.get(
|
2995
2986
|
session, id=l7policy[constants.L7POLICY_ID])
|
2996
2987
|
if db_l7policy:
|
2997
2988
|
# Attempt to increment back the L7Rule quota
|
2998
2989
|
for i in range(len(db_l7policy.l7rules)):
|
2999
|
-
|
2990
|
+
session = db_apis.get_session()
|
3000
2991
|
try:
|
3001
2992
|
self.repos.check_quota_met(
|
3002
|
-
session,
|
2993
|
+
session, data_models.L7Rule,
|
3003
2994
|
db_l7policy.project_id)
|
3004
|
-
|
2995
|
+
session.commit()
|
3005
2996
|
except Exception:
|
3006
|
-
|
2997
|
+
session.rollback()
|
3007
2998
|
except Exception:
|
3008
2999
|
# Don't fail the revert flow
|
3009
3000
|
pass
|
@@ -3054,15 +3045,13 @@ class DecrementL7ruleQuota(BaseDatabaseTask):
|
|
3054
3045
|
|
3055
3046
|
try:
|
3056
3047
|
session = db_apis.get_session()
|
3057
|
-
lock_session = db_apis.get_session()
|
3058
3048
|
try:
|
3059
3049
|
self.repos.check_quota_met(session,
|
3060
|
-
lock_session,
|
3061
3050
|
data_models.L7Rule,
|
3062
3051
|
l7rule[constants.PROJECT_ID])
|
3063
|
-
|
3052
|
+
session.commit()
|
3064
3053
|
except Exception:
|
3065
|
-
|
3054
|
+
session.rollback()
|
3066
3055
|
except Exception:
|
3067
3056
|
# Don't fail the revert flow
|
3068
3057
|
pass
|