octavia 12.0.0.0rc2__py3-none-any.whl → 13.0.0.0rc1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- octavia/amphorae/backends/agent/api_server/osutils.py +1 -0
- octavia/amphorae/backends/agent/api_server/plug.py +21 -7
- octavia/amphorae/backends/agent/api_server/templates/amphora-netns.systemd.j2 +2 -2
- octavia/amphorae/backends/agent/api_server/util.py +21 -0
- octavia/amphorae/backends/health_daemon/health_daemon.py +9 -3
- octavia/amphorae/backends/health_daemon/health_sender.py +2 -0
- octavia/amphorae/backends/utils/interface.py +14 -6
- octavia/amphorae/backends/utils/interface_file.py +6 -3
- octavia/amphorae/backends/utils/keepalivedlvs_query.py +8 -9
- octavia/amphorae/drivers/driver_base.py +1 -2
- octavia/amphorae/drivers/haproxy/rest_api_driver.py +11 -25
- octavia/amphorae/drivers/health/heartbeat_udp.py +34 -24
- octavia/amphorae/drivers/keepalived/jinja/jinja_cfg.py +3 -12
- octavia/amphorae/drivers/noop_driver/driver.py +3 -5
- octavia/api/common/pagination.py +4 -4
- octavia/api/drivers/amphora_driver/v2/driver.py +11 -5
- octavia/api/drivers/driver_agent/driver_get.py +22 -14
- octavia/api/drivers/driver_agent/driver_updater.py +8 -4
- octavia/api/drivers/utils.py +4 -2
- octavia/api/healthcheck/healthcheck_plugins.py +4 -2
- octavia/api/root_controller.py +4 -1
- octavia/api/v2/controllers/amphora.py +35 -38
- octavia/api/v2/controllers/availability_zone_profiles.py +43 -33
- octavia/api/v2/controllers/availability_zones.py +22 -18
- octavia/api/v2/controllers/flavor_profiles.py +37 -28
- octavia/api/v2/controllers/flavors.py +19 -15
- octavia/api/v2/controllers/health_monitor.py +44 -33
- octavia/api/v2/controllers/l7policy.py +52 -40
- octavia/api/v2/controllers/l7rule.py +68 -55
- octavia/api/v2/controllers/listener.py +88 -61
- octavia/api/v2/controllers/load_balancer.py +52 -34
- octavia/api/v2/controllers/member.py +63 -52
- octavia/api/v2/controllers/pool.py +55 -42
- octavia/api/v2/controllers/quotas.py +5 -3
- octavia/api/v2/types/listener.py +15 -0
- octavia/cmd/octavia_worker.py +0 -3
- octavia/cmd/status.py +1 -4
- octavia/common/clients.py +25 -45
- octavia/common/config.py +64 -22
- octavia/common/constants.py +3 -2
- octavia/common/data_models.py +7 -1
- octavia/common/jinja/haproxy/combined_listeners/jinja_cfg.py +12 -1
- octavia/common/jinja/haproxy/combined_listeners/templates/macros.j2 +5 -2
- octavia/common/jinja/lvs/jinja_cfg.py +4 -2
- octavia/common/keystone.py +58 -5
- octavia/common/validate.py +35 -0
- octavia/compute/drivers/noop_driver/driver.py +6 -0
- octavia/controller/healthmanager/health_manager.py +3 -6
- octavia/controller/housekeeping/house_keeping.py +36 -37
- octavia/controller/worker/amphora_rate_limit.py +5 -4
- octavia/controller/worker/task_utils.py +57 -41
- octavia/controller/worker/v2/controller_worker.py +160 -103
- octavia/controller/worker/v2/flows/listener_flows.py +3 -0
- octavia/controller/worker/v2/flows/load_balancer_flows.py +9 -14
- octavia/controller/worker/v2/tasks/amphora_driver_tasks.py +152 -91
- octavia/controller/worker/v2/tasks/compute_tasks.py +4 -2
- octavia/controller/worker/v2/tasks/database_tasks.py +542 -400
- octavia/controller/worker/v2/tasks/network_tasks.py +119 -79
- octavia/db/api.py +26 -23
- octavia/db/base_models.py +2 -2
- octavia/db/healthcheck.py +2 -1
- octavia/db/migration/alembic_migrations/versions/632152d2d32e_add_http_strict_transport_security_.py +42 -0
- octavia/db/models.py +12 -2
- octavia/db/prepare.py +2 -0
- octavia/db/repositories.py +462 -482
- octavia/hacking/checks.py +1 -1
- octavia/network/base.py +0 -14
- octavia/network/drivers/neutron/allowed_address_pairs.py +92 -135
- octavia/network/drivers/neutron/base.py +65 -77
- octavia/network/drivers/neutron/utils.py +69 -85
- octavia/network/drivers/noop_driver/driver.py +0 -7
- octavia/statistics/drivers/update_db.py +10 -10
- octavia/tests/common/constants.py +91 -84
- octavia/tests/common/sample_data_models.py +13 -1
- octavia/tests/fixtures.py +32 -0
- octavia/tests/functional/amphorae/backend/agent/api_server/test_server.py +9 -10
- octavia/tests/functional/api/drivers/driver_agent/test_driver_agent.py +260 -15
- octavia/tests/functional/api/test_root_controller.py +3 -28
- octavia/tests/functional/api/v2/base.py +5 -3
- octavia/tests/functional/api/v2/test_amphora.py +18 -5
- octavia/tests/functional/api/v2/test_availability_zone_profiles.py +1 -0
- octavia/tests/functional/api/v2/test_listener.py +51 -19
- octavia/tests/functional/api/v2/test_load_balancer.py +10 -1
- octavia/tests/functional/db/base.py +31 -16
- octavia/tests/functional/db/test_models.py +27 -28
- octavia/tests/functional/db/test_repositories.py +407 -50
- octavia/tests/unit/amphorae/backends/agent/api_server/test_amphora_info.py +2 -0
- octavia/tests/unit/amphorae/backends/agent/api_server/test_osutils.py +1 -1
- octavia/tests/unit/amphorae/backends/agent/api_server/test_plug.py +54 -6
- octavia/tests/unit/amphorae/backends/agent/api_server/test_util.py +35 -0
- octavia/tests/unit/amphorae/backends/health_daemon/test_health_daemon.py +8 -0
- octavia/tests/unit/amphorae/backends/health_daemon/test_health_sender.py +18 -0
- octavia/tests/unit/amphorae/backends/utils/test_interface.py +81 -0
- octavia/tests/unit/amphorae/backends/utils/test_interface_file.py +2 -0
- octavia/tests/unit/amphorae/backends/utils/test_keepalivedlvs_query.py +129 -5
- octavia/tests/unit/amphorae/drivers/haproxy/test_rest_api_driver_1_0.py +42 -20
- octavia/tests/unit/amphorae/drivers/health/test_heartbeat_udp.py +18 -20
- octavia/tests/unit/amphorae/drivers/keepalived/jinja/test_jinja_cfg.py +4 -4
- octavia/tests/unit/amphorae/drivers/noop_driver/test_driver.py +4 -1
- octavia/tests/unit/api/drivers/driver_agent/test_driver_get.py +3 -3
- octavia/tests/unit/api/drivers/driver_agent/test_driver_updater.py +11 -13
- octavia/tests/unit/base.py +6 -0
- octavia/tests/unit/cmd/test_interface.py +2 -2
- octavia/tests/unit/cmd/test_status.py +2 -2
- octavia/tests/unit/common/jinja/haproxy/combined_listeners/test_jinja_cfg.py +152 -1
- octavia/tests/unit/common/sample_configs/sample_configs_combined.py +10 -3
- octavia/tests/unit/common/test_clients.py +0 -39
- octavia/tests/unit/common/test_keystone.py +54 -0
- octavia/tests/unit/common/test_validate.py +67 -0
- octavia/tests/unit/controller/healthmanager/test_health_manager.py +8 -22
- octavia/tests/unit/controller/housekeeping/test_house_keeping.py +3 -64
- octavia/tests/unit/controller/worker/test_amphora_rate_limit.py +1 -1
- octavia/tests/unit/controller/worker/test_task_utils.py +44 -24
- octavia/tests/unit/controller/worker/v2/flows/test_load_balancer_flows.py +0 -1
- octavia/tests/unit/controller/worker/v2/tasks/test_amphora_driver_tasks.py +49 -26
- octavia/tests/unit/controller/worker/v2/tasks/test_database_tasks.py +399 -196
- octavia/tests/unit/controller/worker/v2/tasks/test_database_tasks_quota.py +37 -64
- octavia/tests/unit/controller/worker/v2/tasks/test_network_tasks.py +3 -14
- octavia/tests/unit/controller/worker/v2/test_controller_worker.py +2 -2
- octavia/tests/unit/network/drivers/neutron/test_allowed_address_pairs.py +456 -561
- octavia/tests/unit/network/drivers/neutron/test_base.py +181 -194
- octavia/tests/unit/network/drivers/neutron/test_utils.py +14 -30
- octavia/tests/unit/statistics/drivers/test_update_db.py +7 -5
- {octavia-12.0.0.0rc2.data → octavia-13.0.0.0rc1.data}/data/share/octavia/README.rst +1 -1
- {octavia-12.0.0.0rc2.dist-info → octavia-13.0.0.0rc1.dist-info}/AUTHORS +4 -0
- {octavia-12.0.0.0rc2.dist-info → octavia-13.0.0.0rc1.dist-info}/METADATA +4 -4
- {octavia-12.0.0.0rc2.dist-info → octavia-13.0.0.0rc1.dist-info}/RECORD +141 -189
- {octavia-12.0.0.0rc2.dist-info → octavia-13.0.0.0rc1.dist-info}/entry_points.txt +1 -2
- octavia-13.0.0.0rc1.dist-info/pbr.json +1 -0
- octavia/api/drivers/amphora_driver/v1/__init__.py +0 -11
- octavia/api/drivers/amphora_driver/v1/driver.py +0 -547
- octavia/controller/queue/v1/__init__.py +0 -11
- octavia/controller/queue/v1/consumer.py +0 -64
- octavia/controller/queue/v1/endpoints.py +0 -160
- octavia/controller/worker/v1/__init__.py +0 -11
- octavia/controller/worker/v1/controller_worker.py +0 -1157
- octavia/controller/worker/v1/flows/__init__.py +0 -11
- octavia/controller/worker/v1/flows/amphora_flows.py +0 -610
- octavia/controller/worker/v1/flows/health_monitor_flows.py +0 -105
- octavia/controller/worker/v1/flows/l7policy_flows.py +0 -94
- octavia/controller/worker/v1/flows/l7rule_flows.py +0 -100
- octavia/controller/worker/v1/flows/listener_flows.py +0 -128
- octavia/controller/worker/v1/flows/load_balancer_flows.py +0 -692
- octavia/controller/worker/v1/flows/member_flows.py +0 -230
- octavia/controller/worker/v1/flows/pool_flows.py +0 -127
- octavia/controller/worker/v1/tasks/__init__.py +0 -11
- octavia/controller/worker/v1/tasks/amphora_driver_tasks.py +0 -453
- octavia/controller/worker/v1/tasks/cert_task.py +0 -51
- octavia/controller/worker/v1/tasks/compute_tasks.py +0 -335
- octavia/controller/worker/v1/tasks/database_tasks.py +0 -2756
- octavia/controller/worker/v1/tasks/lifecycle_tasks.py +0 -173
- octavia/controller/worker/v1/tasks/model_tasks.py +0 -41
- octavia/controller/worker/v1/tasks/network_tasks.py +0 -970
- octavia/controller/worker/v1/tasks/retry_tasks.py +0 -74
- octavia/tests/unit/api/drivers/amphora_driver/v1/__init__.py +0 -11
- octavia/tests/unit/api/drivers/amphora_driver/v1/test_driver.py +0 -824
- octavia/tests/unit/controller/queue/v1/__init__.py +0 -11
- octavia/tests/unit/controller/queue/v1/test_consumer.py +0 -61
- octavia/tests/unit/controller/queue/v1/test_endpoints.py +0 -189
- octavia/tests/unit/controller/worker/v1/__init__.py +0 -11
- octavia/tests/unit/controller/worker/v1/flows/__init__.py +0 -11
- octavia/tests/unit/controller/worker/v1/flows/test_amphora_flows.py +0 -474
- octavia/tests/unit/controller/worker/v1/flows/test_health_monitor_flows.py +0 -72
- octavia/tests/unit/controller/worker/v1/flows/test_l7policy_flows.py +0 -67
- octavia/tests/unit/controller/worker/v1/flows/test_l7rule_flows.py +0 -67
- octavia/tests/unit/controller/worker/v1/flows/test_listener_flows.py +0 -91
- octavia/tests/unit/controller/worker/v1/flows/test_load_balancer_flows.py +0 -431
- octavia/tests/unit/controller/worker/v1/flows/test_member_flows.py +0 -106
- octavia/tests/unit/controller/worker/v1/flows/test_pool_flows.py +0 -77
- octavia/tests/unit/controller/worker/v1/tasks/__init__.py +0 -11
- octavia/tests/unit/controller/worker/v1/tasks/test_amphora_driver_tasks.py +0 -792
- octavia/tests/unit/controller/worker/v1/tasks/test_cert_task.py +0 -46
- octavia/tests/unit/controller/worker/v1/tasks/test_compute_tasks.py +0 -634
- octavia/tests/unit/controller/worker/v1/tasks/test_database_tasks.py +0 -2615
- octavia/tests/unit/controller/worker/v1/tasks/test_database_tasks_quota.py +0 -415
- octavia/tests/unit/controller/worker/v1/tasks/test_lifecycle_tasks.py +0 -401
- octavia/tests/unit/controller/worker/v1/tasks/test_model_tasks.py +0 -44
- octavia/tests/unit/controller/worker/v1/tasks/test_network_tasks.py +0 -1788
- octavia/tests/unit/controller/worker/v1/tasks/test_retry_tasks.py +0 -47
- octavia/tests/unit/controller/worker/v1/test_controller_worker.py +0 -2096
- octavia-12.0.0.0rc2.dist-info/pbr.json +0 -1
- {octavia-12.0.0.0rc2.data → octavia-13.0.0.0rc1.data}/data/share/octavia/LICENSE +0 -0
- {octavia-12.0.0.0rc2.data → octavia-13.0.0.0rc1.data}/data/share/octavia/diskimage-create/README.rst +0 -0
- {octavia-12.0.0.0rc2.data → octavia-13.0.0.0rc1.data}/data/share/octavia/diskimage-create/diskimage-create.sh +0 -0
- {octavia-12.0.0.0rc2.data → octavia-13.0.0.0rc1.data}/data/share/octavia/diskimage-create/image-tests.sh +0 -0
- {octavia-12.0.0.0rc2.data → octavia-13.0.0.0rc1.data}/data/share/octavia/diskimage-create/requirements.txt +0 -0
- {octavia-12.0.0.0rc2.data → octavia-13.0.0.0rc1.data}/data/share/octavia/diskimage-create/test-requirements.txt +0 -0
- {octavia-12.0.0.0rc2.data → octavia-13.0.0.0rc1.data}/data/share/octavia/diskimage-create/tox.ini +0 -0
- {octavia-12.0.0.0rc2.data → octavia-13.0.0.0rc1.data}/data/share/octavia/diskimage-create/version.txt +0 -0
- {octavia-12.0.0.0rc2.data → octavia-13.0.0.0rc1.data}/scripts/octavia-wsgi +0 -0
- {octavia-12.0.0.0rc2.dist-info → octavia-13.0.0.0rc1.dist-info}/LICENSE +0 -0
- {octavia-12.0.0.0rc2.dist-info → octavia-13.0.0.0rc1.dist-info}/WHEEL +0 -0
- {octavia-12.0.0.0rc2.dist-info → octavia-13.0.0.0rc1.dist-info}/top_level.txt +0 -0
@@ -55,28 +55,28 @@ class BaseDatabaseTask(task.Task):
|
|
55
55
|
self.task_utils = task_utilities.TaskUtils()
|
56
56
|
super().__init__(**kwargs)
|
57
57
|
|
58
|
-
def _delete_from_amp_health(self, amphora_id):
|
58
|
+
def _delete_from_amp_health(self, session, amphora_id):
|
59
59
|
"""Delete the amphora_health record for an amphora.
|
60
60
|
|
61
61
|
:param amphora_id: The amphora id to delete
|
62
62
|
"""
|
63
63
|
LOG.debug('Disabling health monitoring on amphora: %s', amphora_id)
|
64
64
|
try:
|
65
|
-
self.amp_health_repo.delete(
|
65
|
+
self.amp_health_repo.delete(session,
|
66
66
|
amphora_id=amphora_id)
|
67
67
|
except (sqlalchemy.orm.exc.NoResultFound,
|
68
68
|
sqlalchemy.orm.exc.UnmappedInstanceError):
|
69
69
|
LOG.debug('No existing amphora health record to delete '
|
70
70
|
'for amphora: %s, skipping.', amphora_id)
|
71
71
|
|
72
|
-
def _mark_amp_health_busy(self, amphora_id):
|
72
|
+
def _mark_amp_health_busy(self, session, amphora_id):
|
73
73
|
"""Mark the amphora_health record busy for an amphora.
|
74
74
|
|
75
75
|
:param amphora_id: The amphora id to mark busy
|
76
76
|
"""
|
77
77
|
LOG.debug('Marking health monitoring busy on amphora: %s', amphora_id)
|
78
78
|
try:
|
79
|
-
self.amp_health_repo.update(
|
79
|
+
self.amp_health_repo.update(session,
|
80
80
|
amphora_id=amphora_id,
|
81
81
|
busy=True)
|
82
82
|
except (sqlalchemy.orm.exc.NoResultFound,
|
@@ -94,11 +94,13 @@ class CreateAmphoraInDB(BaseDatabaseTask):
|
|
94
94
|
:returns: The created amphora object
|
95
95
|
"""
|
96
96
|
|
97
|
-
|
98
|
-
|
99
|
-
|
100
|
-
|
101
|
-
|
97
|
+
with db_apis.session().begin() as session:
|
98
|
+
amphora = self.amphora_repo.create(
|
99
|
+
session,
|
100
|
+
id=uuidutils.generate_uuid(),
|
101
|
+
load_balancer_id=loadbalancer_id,
|
102
|
+
status=constants.PENDING_CREATE,
|
103
|
+
cert_busy=False)
|
102
104
|
if loadbalancer_id:
|
103
105
|
LOG.info("Created Amphora %s in DB for load balancer %s",
|
104
106
|
amphora.id, loadbalancer_id)
|
@@ -129,7 +131,8 @@ class CreateAmphoraInDB(BaseDatabaseTask):
|
|
129
131
|
|
130
132
|
# Delete the amphora for now. May want to just update status later
|
131
133
|
try:
|
132
|
-
|
134
|
+
with db_apis.session().begin() as session:
|
135
|
+
self.amphora_repo.delete(session, id=result)
|
133
136
|
except Exception as e:
|
134
137
|
LOG.error("Failed to delete amphora %(amp)s "
|
135
138
|
"in the database due to: "
|
@@ -146,12 +149,13 @@ class MarkLBAmphoraeDeletedInDB(BaseDatabaseTask):
|
|
146
149
|
marked DELETED.
|
147
150
|
:returns: None
|
148
151
|
"""
|
149
|
-
|
150
|
-
|
151
|
-
|
152
|
-
|
153
|
-
|
154
|
-
|
152
|
+
with db_apis.session().begin() as session:
|
153
|
+
db_lb = self.repos.load_balancer.get(
|
154
|
+
session, id=loadbalancer[constants.LOADBALANCER_ID])
|
155
|
+
for amp in db_lb.amphorae:
|
156
|
+
LOG.debug("Marking amphora %s DELETED ", amp.id)
|
157
|
+
self.amphora_repo.update(session,
|
158
|
+
id=amp.id, status=constants.DELETED)
|
155
159
|
|
156
160
|
|
157
161
|
class DeleteHealthMonitorInDB(BaseDatabaseTask):
|
@@ -170,9 +174,10 @@ class DeleteHealthMonitorInDB(BaseDatabaseTask):
|
|
170
174
|
LOG.debug("DB delete health monitor: %s ",
|
171
175
|
health_mon[constants.HEALTHMONITOR_ID])
|
172
176
|
try:
|
173
|
-
|
174
|
-
|
175
|
-
|
177
|
+
with db_apis.session().begin() as session:
|
178
|
+
self.health_mon_repo.delete(
|
179
|
+
session,
|
180
|
+
id=health_mon[constants.HEALTHMONITOR_ID])
|
176
181
|
except exc.NoResultFound:
|
177
182
|
# ignore if the HealthMonitor was not found
|
178
183
|
pass
|
@@ -187,9 +192,11 @@ class DeleteHealthMonitorInDB(BaseDatabaseTask):
|
|
187
192
|
LOG.warning("Reverting mark health monitor delete in DB "
|
188
193
|
"for health monitor with id %s",
|
189
194
|
health_mon[constants.HEALTHMONITOR_ID])
|
190
|
-
|
191
|
-
|
192
|
-
|
195
|
+
with db_apis.session().begin() as session:
|
196
|
+
self.health_mon_repo.update(
|
197
|
+
session,
|
198
|
+
id=health_mon[constants.HEALTHMONITOR_ID],
|
199
|
+
provisioning_status=constants.ERROR)
|
193
200
|
|
194
201
|
|
195
202
|
class DeleteHealthMonitorInDBByPool(DeleteHealthMonitorInDB):
|
@@ -204,10 +211,11 @@ class DeleteHealthMonitorInDBByPool(DeleteHealthMonitorInDB):
|
|
204
211
|
:param pool_id: ID of pool which health monitor should be deleted.
|
205
212
|
:returns: None
|
206
213
|
"""
|
207
|
-
|
208
|
-
|
209
|
-
|
210
|
-
|
214
|
+
with db_apis.session().begin() as session:
|
215
|
+
db_pool = self.pool_repo.get(session,
|
216
|
+
id=pool_id)
|
217
|
+
provider_hm = provider_utils.db_HM_to_provider_HM(
|
218
|
+
db_pool.health_monitor).to_dict()
|
211
219
|
super().execute(
|
212
220
|
provider_hm)
|
213
221
|
|
@@ -217,8 +225,9 @@ class DeleteHealthMonitorInDBByPool(DeleteHealthMonitorInDB):
|
|
217
225
|
:param pool_id: ID of pool which health monitor couldn't be deleted
|
218
226
|
:returns: None
|
219
227
|
"""
|
220
|
-
|
221
|
-
|
228
|
+
with db_apis.session().begin() as session:
|
229
|
+
db_pool = self.pool_repo.get(session,
|
230
|
+
id=pool_id)
|
222
231
|
provider_hm = provider_utils.db_HM_to_provider_HM(
|
223
232
|
db_pool.health_monitor).to_dict()
|
224
233
|
super().revert(
|
@@ -239,8 +248,9 @@ class DeleteMemberInDB(BaseDatabaseTask):
|
|
239
248
|
"""
|
240
249
|
|
241
250
|
LOG.debug("DB delete member for id: %s ", member[constants.MEMBER_ID])
|
242
|
-
|
243
|
-
|
251
|
+
with db_apis.session().begin() as session:
|
252
|
+
self.member_repo.delete(session,
|
253
|
+
id=member[constants.MEMBER_ID])
|
244
254
|
|
245
255
|
def revert(self, member, *args, **kwargs):
|
246
256
|
"""Mark the member ERROR since the delete couldn't happen
|
@@ -252,9 +262,10 @@ class DeleteMemberInDB(BaseDatabaseTask):
|
|
252
262
|
LOG.warning("Reverting delete in DB for member id %s",
|
253
263
|
member[constants.MEMBER_ID])
|
254
264
|
try:
|
255
|
-
|
256
|
-
|
257
|
-
|
265
|
+
with db_apis.session().begin() as session:
|
266
|
+
self.member_repo.update(session,
|
267
|
+
member[constants.MEMBER_ID],
|
268
|
+
provisioning_status=constants.ERROR)
|
258
269
|
except Exception as e:
|
259
270
|
LOG.error("Failed to update member %(mem)s "
|
260
271
|
"provisioning_status to ERROR due to: %(except)s",
|
@@ -272,8 +283,9 @@ class DeleteListenerInDB(BaseDatabaseTask):
|
|
272
283
|
"""
|
273
284
|
LOG.debug("Delete in DB for listener id: %s",
|
274
285
|
listener[constants.LISTENER_ID])
|
275
|
-
|
276
|
-
|
286
|
+
with db_apis.session().begin() as session:
|
287
|
+
self.listener_repo.delete(session,
|
288
|
+
id=listener[constants.LISTENER_ID])
|
277
289
|
|
278
290
|
def revert(self, listener, *args, **kwargs):
|
279
291
|
"""Mark the listener ERROR since the listener didn't delete
|
@@ -301,7 +313,8 @@ class DeletePoolInDB(BaseDatabaseTask):
|
|
301
313
|
"""
|
302
314
|
|
303
315
|
LOG.debug("Delete in DB for pool id: %s ", pool_id)
|
304
|
-
|
316
|
+
with db_apis.session().begin() as session:
|
317
|
+
self.pool_repo.delete(session, id=pool_id)
|
305
318
|
|
306
319
|
def revert(self, pool_id, *args, **kwargs):
|
307
320
|
"""Mark the pool ERROR since the delete couldn't happen
|
@@ -312,8 +325,9 @@ class DeletePoolInDB(BaseDatabaseTask):
|
|
312
325
|
|
313
326
|
LOG.warning("Reverting delete in DB for pool id %s", pool_id)
|
314
327
|
try:
|
315
|
-
|
316
|
-
|
328
|
+
with db_apis.session().begin() as session:
|
329
|
+
self.pool_repo.update(session, pool_id,
|
330
|
+
provisioning_status=constants.ERROR)
|
317
331
|
except Exception as e:
|
318
332
|
LOG.error("Failed to update pool %(pool)s "
|
319
333
|
"provisioning_status to ERROR due to: %(except)s",
|
@@ -335,8 +349,9 @@ class DeleteL7PolicyInDB(BaseDatabaseTask):
|
|
335
349
|
|
336
350
|
LOG.debug("Delete in DB for l7policy id: %s ",
|
337
351
|
l7policy[constants.L7POLICY_ID])
|
338
|
-
|
339
|
-
|
352
|
+
with db_apis.session().begin() as session:
|
353
|
+
self.l7policy_repo.delete(session,
|
354
|
+
id=l7policy[constants.L7POLICY_ID])
|
340
355
|
|
341
356
|
def revert(self, l7policy, *args, **kwargs):
|
342
357
|
"""Mark the l7policy ERROR since the delete couldn't happen
|
@@ -348,9 +363,10 @@ class DeleteL7PolicyInDB(BaseDatabaseTask):
|
|
348
363
|
LOG.warning("Reverting delete in DB for l7policy id %s",
|
349
364
|
l7policy[constants.L7POLICY_ID])
|
350
365
|
try:
|
351
|
-
|
352
|
-
|
353
|
-
|
366
|
+
with db_apis.session().begin() as session:
|
367
|
+
self.l7policy_repo.update(session,
|
368
|
+
l7policy[constants.L7POLICY_ID],
|
369
|
+
provisioning_status=constants.ERROR)
|
354
370
|
except Exception as e:
|
355
371
|
LOG.error("Failed to update l7policy %(l7policy)s "
|
356
372
|
"provisioning_status to ERROR due to: %(except)s",
|
@@ -373,8 +389,9 @@ class DeleteL7RuleInDB(BaseDatabaseTask):
|
|
373
389
|
|
374
390
|
LOG.debug("Delete in DB for l7rule id: %s",
|
375
391
|
l7rule[constants.L7RULE_ID])
|
376
|
-
|
377
|
-
|
392
|
+
with db_apis.session().begin() as session:
|
393
|
+
self.l7rule_repo.delete(session,
|
394
|
+
id=l7rule[constants.L7RULE_ID])
|
378
395
|
|
379
396
|
def revert(self, l7rule, *args, **kwargs):
|
380
397
|
"""Mark the l7rule ERROR since the delete couldn't happen
|
@@ -386,9 +403,10 @@ class DeleteL7RuleInDB(BaseDatabaseTask):
|
|
386
403
|
LOG.warning("Reverting delete in DB for l7rule id %s",
|
387
404
|
l7rule[constants.L7RULE_ID])
|
388
405
|
try:
|
389
|
-
|
390
|
-
|
391
|
-
|
406
|
+
with db_apis.session().begin() as session:
|
407
|
+
self.l7rule_repo.update(session,
|
408
|
+
l7rule[constants.L7RULE_ID],
|
409
|
+
provisioning_status=constants.ERROR)
|
392
410
|
except Exception as e:
|
393
411
|
LOG.error("Failed to update l7rule %(l7rule)s "
|
394
412
|
"provisioning_status to ERROR due to: %(except)s",
|
@@ -408,8 +426,9 @@ class ReloadAmphora(BaseDatabaseTask):
|
|
408
426
|
|
409
427
|
LOG.debug("Get amphora from DB for amphora id: %s ",
|
410
428
|
amphora[constants.ID])
|
411
|
-
|
412
|
-
|
429
|
+
with db_apis.session().begin() as session:
|
430
|
+
return self.amphora_repo.get(
|
431
|
+
session, id=amphora[constants.ID]).to_dict()
|
413
432
|
|
414
433
|
|
415
434
|
class ReloadLoadBalancer(BaseDatabaseTask):
|
@@ -424,8 +443,9 @@ class ReloadLoadBalancer(BaseDatabaseTask):
|
|
424
443
|
|
425
444
|
LOG.debug("Get load balancer from DB for load balancer id: %s ",
|
426
445
|
loadbalancer_id)
|
427
|
-
|
428
|
-
|
446
|
+
with db_apis.session().begin() as session:
|
447
|
+
db_lb = self.loadbalancer_repo.get(session,
|
448
|
+
id=loadbalancer_id)
|
429
449
|
lb_dict = provider_utils.db_loadbalancer_to_provider_loadbalancer(
|
430
450
|
db_lb)
|
431
451
|
return lb_dict.to_dict()
|
@@ -442,12 +462,13 @@ class UpdateVIPAfterAllocation(BaseDatabaseTask):
|
|
442
462
|
:param vip: data_models.Vip object with update data.
|
443
463
|
:returns: The load balancer object.
|
444
464
|
"""
|
445
|
-
|
446
|
-
|
447
|
-
|
448
|
-
|
449
|
-
|
450
|
-
|
465
|
+
with db_apis.session().begin() as session:
|
466
|
+
self.repos.vip.update(session, loadbalancer_id,
|
467
|
+
port_id=vip[constants.PORT_ID],
|
468
|
+
subnet_id=vip[constants.SUBNET_ID],
|
469
|
+
ip_address=vip[constants.IP_ADDRESS])
|
470
|
+
db_lb = self.repos.load_balancer.get(session,
|
471
|
+
id=loadbalancer_id)
|
451
472
|
prov_lb = provider_utils.db_loadbalancer_to_provider_loadbalancer(
|
452
473
|
db_lb)
|
453
474
|
LOG.info("Updated vip with port id %s, subnet id %s, ip address %s "
|
@@ -471,18 +492,19 @@ class UpdateAdditionalVIPsAfterAllocation(BaseDatabaseTask):
|
|
471
492
|
data.
|
472
493
|
:returns: The load balancer object.
|
473
494
|
"""
|
474
|
-
|
475
|
-
|
476
|
-
|
477
|
-
|
478
|
-
|
479
|
-
|
480
|
-
|
481
|
-
|
482
|
-
|
483
|
-
|
484
|
-
|
485
|
-
|
495
|
+
with db_apis.session().begin() as session:
|
496
|
+
for vip in additional_vips:
|
497
|
+
LOG.info("Updating additional VIP with subnet_id %s, "
|
498
|
+
"ip_address %s for load balancer %s",
|
499
|
+
vip[constants.SUBNET_ID], vip[constants.IP_ADDRESS],
|
500
|
+
loadbalancer_id)
|
501
|
+
self.repos.additional_vip.update(
|
502
|
+
session, loadbalancer_id,
|
503
|
+
vip[constants.SUBNET_ID],
|
504
|
+
ip_address=vip[constants.IP_ADDRESS],
|
505
|
+
port_id=vip[constants.PORT_ID])
|
506
|
+
db_lb = self.repos.load_balancer.get(session,
|
507
|
+
id=loadbalancer_id)
|
486
508
|
return provider_utils.db_loadbalancer_to_provider_loadbalancer(
|
487
509
|
db_lb).to_dict()
|
488
510
|
|
@@ -496,15 +518,16 @@ class UpdateAmphoraeVIPData(BaseDatabaseTask):
|
|
496
518
|
:param amps_data: Amphorae update dicts.
|
497
519
|
:returns: None
|
498
520
|
"""
|
499
|
-
|
500
|
-
|
501
|
-
|
502
|
-
|
503
|
-
|
504
|
-
|
505
|
-
|
506
|
-
|
507
|
-
|
521
|
+
with db_apis.session().begin() as session:
|
522
|
+
for amp_data in amps_data:
|
523
|
+
self.repos.amphora.update(
|
524
|
+
session,
|
525
|
+
amp_data.get(constants.ID),
|
526
|
+
vrrp_ip=amp_data[constants.VRRP_IP],
|
527
|
+
ha_ip=amp_data[constants.HA_IP],
|
528
|
+
vrrp_port_id=amp_data[constants.VRRP_PORT_ID],
|
529
|
+
ha_port_id=amp_data[constants.HA_PORT_ID],
|
530
|
+
vrrp_id=1)
|
508
531
|
|
509
532
|
|
510
533
|
class UpdateAmphoraVIPData(BaseDatabaseTask):
|
@@ -516,14 +539,15 @@ class UpdateAmphoraVIPData(BaseDatabaseTask):
|
|
516
539
|
:param amps_data: Amphorae update dicts.
|
517
540
|
:returns: None
|
518
541
|
"""
|
519
|
-
|
520
|
-
|
521
|
-
|
522
|
-
|
523
|
-
|
524
|
-
|
525
|
-
|
526
|
-
|
542
|
+
with db_apis.session().begin() as session:
|
543
|
+
self.repos.amphora.update(
|
544
|
+
session,
|
545
|
+
amp_data.get(constants.ID),
|
546
|
+
vrrp_ip=amp_data[constants.VRRP_IP],
|
547
|
+
ha_ip=amp_data[constants.HA_IP],
|
548
|
+
vrrp_port_id=amp_data[constants.VRRP_PORT_ID],
|
549
|
+
ha_port_id=amp_data[constants.HA_PORT_ID],
|
550
|
+
vrrp_id=1)
|
527
551
|
|
528
552
|
|
529
553
|
class UpdateAmpFailoverDetails(BaseDatabaseTask):
|
@@ -538,17 +562,19 @@ class UpdateAmpFailoverDetails(BaseDatabaseTask):
|
|
538
562
|
:returns: None
|
539
563
|
"""
|
540
564
|
# role and vrrp_priority will be updated later.
|
541
|
-
|
542
|
-
|
543
|
-
|
544
|
-
|
545
|
-
|
546
|
-
|
547
|
-
|
548
|
-
|
549
|
-
|
550
|
-
|
551
|
-
|
565
|
+
with db_apis.session().begin() as session:
|
566
|
+
self.repos.amphora.update(
|
567
|
+
session,
|
568
|
+
amphora.get(constants.ID),
|
569
|
+
# TODO(johnsom) We should do a better job getting the fixed_ip
|
570
|
+
# as this could be a problem with dual stack.
|
571
|
+
# Fix this during the multi-vip patch.
|
572
|
+
vrrp_ip=(
|
573
|
+
base_port[constants.FIXED_IPS][0][constants.IP_ADDRESS]),
|
574
|
+
ha_ip=vip[constants.IP_ADDRESS],
|
575
|
+
vrrp_port_id=base_port[constants.ID],
|
576
|
+
ha_port_id=vip[constants.PORT_ID],
|
577
|
+
vrrp_id=1)
|
552
578
|
|
553
579
|
|
554
580
|
class AssociateFailoverAmphoraWithLBID(BaseDatabaseTask):
|
@@ -562,9 +588,10 @@ class AssociateFailoverAmphoraWithLBID(BaseDatabaseTask):
|
|
562
588
|
a given amphora.
|
563
589
|
:returns: None
|
564
590
|
"""
|
565
|
-
|
566
|
-
|
567
|
-
|
591
|
+
with db_apis.session().begin() as session:
|
592
|
+
self.repos.amphora.associate(session,
|
593
|
+
load_balancer_id=loadbalancer_id,
|
594
|
+
amphora_id=amphora_id)
|
568
595
|
|
569
596
|
def revert(self, amphora_id, *args, **kwargs):
|
570
597
|
"""Remove amphora-load balancer association.
|
@@ -574,8 +601,9 @@ class AssociateFailoverAmphoraWithLBID(BaseDatabaseTask):
|
|
574
601
|
:returns: None
|
575
602
|
"""
|
576
603
|
try:
|
577
|
-
|
578
|
-
|
604
|
+
with db_apis.session().begin() as session:
|
605
|
+
self.repos.amphora.update(session, amphora_id,
|
606
|
+
loadbalancer_id=None)
|
579
607
|
except Exception as e:
|
580
608
|
LOG.error("Failed to update amphora %(amp)s "
|
581
609
|
"load balancer id to None due to: "
|
@@ -595,8 +623,9 @@ class _MarkAmphoraRoleAndPriorityInDB(BaseDatabaseTask):
|
|
595
623
|
"""
|
596
624
|
LOG.debug("Mark %(role)s in DB for amphora: %(amp)s",
|
597
625
|
{constants.ROLE: amp_role, 'amp': amphora_id})
|
598
|
-
|
599
|
-
|
626
|
+
with db_apis.session().begin() as session:
|
627
|
+
self.amphora_repo.update(session, amphora_id, role=amp_role,
|
628
|
+
vrrp_priority=vrrp_priority)
|
600
629
|
|
601
630
|
def _revert(self, result, amphora_id, *args, **kwargs):
|
602
631
|
"""Removes role and vrrp_priority association.
|
@@ -613,8 +642,9 @@ class _MarkAmphoraRoleAndPriorityInDB(BaseDatabaseTask):
|
|
613
642
|
LOG.warning("Reverting amphora role in DB for amp id %(amp)s",
|
614
643
|
{'amp': amphora_id})
|
615
644
|
try:
|
616
|
-
|
617
|
-
|
645
|
+
with db_apis.session().begin() as session:
|
646
|
+
self.amphora_repo.update(session, amphora_id,
|
647
|
+
role=None, vrrp_priority=None)
|
618
648
|
except Exception as e:
|
619
649
|
LOG.error("Failed to update amphora %(amp)s "
|
620
650
|
"role and vrrp_priority to None due to: "
|
@@ -709,13 +739,14 @@ class MarkAmphoraAllocatedInDB(BaseDatabaseTask):
|
|
709
739
|
'comp': amphora[constants.COMPUTE_ID],
|
710
740
|
'lb': loadbalancer_id
|
711
741
|
})
|
712
|
-
|
713
|
-
|
714
|
-
|
715
|
-
|
716
|
-
|
717
|
-
|
718
|
-
|
742
|
+
with db_apis.session().begin() as session:
|
743
|
+
self.amphora_repo.update(
|
744
|
+
session,
|
745
|
+
amphora.get(constants.ID),
|
746
|
+
status=constants.AMPHORA_ALLOCATED,
|
747
|
+
compute_id=amphora[constants.COMPUTE_ID],
|
748
|
+
lb_network_ip=amphora[constants.LB_NETWORK_IP],
|
749
|
+
load_balancer_id=loadbalancer_id)
|
719
750
|
|
720
751
|
def revert(self, result, amphora, loadbalancer_id, *args, **kwargs):
|
721
752
|
"""Mark the amphora as broken and ready to be cleaned up.
|
@@ -752,9 +783,10 @@ class MarkAmphoraBootingInDB(BaseDatabaseTask):
|
|
752
783
|
LOG.debug("Mark BOOTING in DB for amphora: %(amp)s with "
|
753
784
|
"compute id %(id)s", {'amp': amphora_id,
|
754
785
|
constants.ID: compute_id})
|
755
|
-
|
756
|
-
|
757
|
-
|
786
|
+
with db_apis.session().begin() as session:
|
787
|
+
self.amphora_repo.update(session, amphora_id,
|
788
|
+
status=constants.AMPHORA_BOOTING,
|
789
|
+
compute_id=compute_id)
|
758
790
|
|
759
791
|
def revert(self, result, amphora_id, compute_id, *args, **kwargs):
|
760
792
|
"""Mark the amphora as broken and ready to be cleaned up.
|
@@ -772,9 +804,10 @@ class MarkAmphoraBootingInDB(BaseDatabaseTask):
|
|
772
804
|
"id %(amp)s and compute id %(comp)s",
|
773
805
|
{'amp': amphora_id, 'comp': compute_id})
|
774
806
|
try:
|
775
|
-
|
776
|
-
|
777
|
-
|
807
|
+
with db_apis.session().begin() as session:
|
808
|
+
self.amphora_repo.update(session, amphora_id,
|
809
|
+
status=constants.ERROR,
|
810
|
+
compute_id=compute_id)
|
778
811
|
except Exception as e:
|
779
812
|
LOG.error("Failed to update amphora %(amp)s "
|
780
813
|
"status to ERROR due to: "
|
@@ -798,9 +831,10 @@ class MarkAmphoraDeletedInDB(BaseDatabaseTask):
|
|
798
831
|
"compute id %(comp)s",
|
799
832
|
{'amp': amphora[constants.ID],
|
800
833
|
'comp': amphora[constants.COMPUTE_ID]})
|
801
|
-
|
802
|
-
|
803
|
-
|
834
|
+
with db_apis.session().begin() as session:
|
835
|
+
self.amphora_repo.update(session,
|
836
|
+
amphora[constants.ID],
|
837
|
+
status=constants.DELETED)
|
804
838
|
|
805
839
|
def revert(self, amphora, *args, **kwargs):
|
806
840
|
"""Mark the amphora as broken and ready to be cleaned up.
|
@@ -834,9 +868,10 @@ class MarkAmphoraPendingDeleteInDB(BaseDatabaseTask):
|
|
834
868
|
"with compute id %(id)s",
|
835
869
|
{'amp': amphora[constants.ID],
|
836
870
|
'id': amphora[constants.COMPUTE_ID]})
|
837
|
-
|
838
|
-
|
839
|
-
|
871
|
+
with db_apis.session().begin() as session:
|
872
|
+
self.amphora_repo.update(session,
|
873
|
+
amphora[constants.ID],
|
874
|
+
status=constants.PENDING_DELETE)
|
840
875
|
|
841
876
|
def revert(self, amphora, *args, **kwargs):
|
842
877
|
"""Mark the amphora as broken and ready to be cleaned up.
|
@@ -869,9 +904,10 @@ class MarkAmphoraPendingUpdateInDB(BaseDatabaseTask):
|
|
869
904
|
"with compute id %(id)s",
|
870
905
|
{'amp': amphora.get(constants.ID),
|
871
906
|
'id': amphora[constants.COMPUTE_ID]})
|
872
|
-
|
873
|
-
|
874
|
-
|
907
|
+
with db_apis.session().begin() as session:
|
908
|
+
self.amphora_repo.update(session,
|
909
|
+
amphora.get(constants.ID),
|
910
|
+
status=constants.PENDING_UPDATE)
|
875
911
|
|
876
912
|
def revert(self, amphora, *args, **kwargs):
|
877
913
|
"""Mark the amphora as broken and ready to be cleaned up.
|
@@ -905,12 +941,13 @@ class MarkAmphoraReadyInDB(BaseDatabaseTask):
|
|
905
941
|
"id %(comp)s",
|
906
942
|
{"amp": amphora.get(constants.ID),
|
907
943
|
"comp": amphora[constants.COMPUTE_ID]})
|
908
|
-
|
909
|
-
|
910
|
-
|
911
|
-
|
912
|
-
|
913
|
-
|
944
|
+
with db_apis.session().begin() as session:
|
945
|
+
self.amphora_repo.update(
|
946
|
+
session,
|
947
|
+
amphora.get(constants.ID),
|
948
|
+
status=constants.AMPHORA_READY,
|
949
|
+
compute_id=amphora[constants.COMPUTE_ID],
|
950
|
+
lb_network_ip=amphora[constants.LB_NETWORK_IP])
|
914
951
|
|
915
952
|
def revert(self, amphora, *args, **kwargs):
|
916
953
|
"""Mark the amphora as broken and ready to be cleaned up.
|
@@ -924,12 +961,13 @@ class MarkAmphoraReadyInDB(BaseDatabaseTask):
|
|
924
961
|
{'amp': amphora.get(constants.ID),
|
925
962
|
'comp': amphora[constants.COMPUTE_ID]})
|
926
963
|
try:
|
927
|
-
|
928
|
-
|
929
|
-
|
930
|
-
|
931
|
-
|
932
|
-
|
964
|
+
with db_apis.session().begin() as session:
|
965
|
+
self.amphora_repo.update(
|
966
|
+
session,
|
967
|
+
amphora.get(constants.ID),
|
968
|
+
status=constants.ERROR,
|
969
|
+
compute_id=amphora[constants.COMPUTE_ID],
|
970
|
+
lb_network_ip=amphora[constants.LB_NETWORK_IP])
|
933
971
|
except Exception as e:
|
934
972
|
LOG.error("Failed to update amphora %(amp)s "
|
935
973
|
"status to ERROR due to: "
|
@@ -948,8 +986,9 @@ class UpdateAmphoraComputeId(BaseDatabaseTask):
|
|
948
986
|
:returns: None
|
949
987
|
"""
|
950
988
|
|
951
|
-
|
952
|
-
|
989
|
+
with db_apis.session().begin() as session:
|
990
|
+
self.amphora_repo.update(session, amphora_id,
|
991
|
+
compute_id=compute_id)
|
953
992
|
|
954
993
|
|
955
994
|
class UpdateAmphoraInfo(BaseDatabaseTask):
|
@@ -962,14 +1001,15 @@ class UpdateAmphoraInfo(BaseDatabaseTask):
|
|
962
1001
|
:param compute_obj: Compute on which an amphora resides
|
963
1002
|
:returns: Updated amphora object
|
964
1003
|
"""
|
965
|
-
|
966
|
-
|
967
|
-
|
968
|
-
|
969
|
-
|
970
|
-
|
971
|
-
|
972
|
-
|
1004
|
+
with db_apis.session().begin() as session:
|
1005
|
+
self.amphora_repo.update(
|
1006
|
+
session, amphora_id,
|
1007
|
+
lb_network_ip=compute_obj[constants.LB_NETWORK_IP],
|
1008
|
+
cached_zone=compute_obj[constants.CACHED_ZONE],
|
1009
|
+
image_id=compute_obj[constants.IMAGE_ID],
|
1010
|
+
compute_flavor=compute_obj[constants.COMPUTE_FLAVOR])
|
1011
|
+
return self.amphora_repo.get(session,
|
1012
|
+
id=amphora_id).to_dict()
|
973
1013
|
|
974
1014
|
|
975
1015
|
class UpdateAmphoraDBCertExpiration(BaseDatabaseTask):
|
@@ -990,8 +1030,9 @@ class UpdateAmphoraDBCertExpiration(BaseDatabaseTask):
|
|
990
1030
|
cert_expiration = cert_parser.get_cert_expiration(
|
991
1031
|
fer.decrypt(server_pem.encode("utf-8")))
|
992
1032
|
LOG.debug("Certificate expiration date is %s ", cert_expiration)
|
993
|
-
|
994
|
-
|
1033
|
+
with db_apis.session().begin() as session:
|
1034
|
+
self.amphora_repo.update(session, amphora_id,
|
1035
|
+
cert_expiration=cert_expiration)
|
995
1036
|
|
996
1037
|
|
997
1038
|
class UpdateAmphoraCertBusyToFalse(BaseDatabaseTask):
|
@@ -1006,8 +1047,9 @@ class UpdateAmphoraCertBusyToFalse(BaseDatabaseTask):
|
|
1006
1047
|
|
1007
1048
|
LOG.debug("Update cert_busy flag of amphora id %s to False",
|
1008
1049
|
amphora_id)
|
1009
|
-
|
1010
|
-
|
1050
|
+
with db_apis.session().begin() as session:
|
1051
|
+
self.amphora_repo.update(session, amphora_id,
|
1052
|
+
cert_busy=False)
|
1011
1053
|
|
1012
1054
|
|
1013
1055
|
class MarkLBActiveInDB(BaseDatabaseTask):
|
@@ -1033,72 +1075,76 @@ class MarkLBActiveInDB(BaseDatabaseTask):
|
|
1033
1075
|
if self.mark_subobjects:
|
1034
1076
|
LOG.debug("Marking all listeners of loadbalancer %s ACTIVE",
|
1035
1077
|
loadbalancer[constants.LOADBALANCER_ID])
|
1036
|
-
|
1037
|
-
|
1038
|
-
|
1039
|
-
|
1040
|
-
|
1078
|
+
with db_apis.session().begin() as session:
|
1079
|
+
db_lb = self.loadbalancer_repo.get(
|
1080
|
+
session, id=loadbalancer[constants.LOADBALANCER_ID])
|
1081
|
+
for listener in db_lb.listeners:
|
1082
|
+
self._mark_listener_status(session, listener,
|
1083
|
+
constants.ACTIVE)
|
1084
|
+
for pool in db_lb.pools:
|
1085
|
+
self._mark_pool_status(session, pool, constants.ACTIVE)
|
1041
1086
|
|
1042
1087
|
LOG.info("Mark ACTIVE in DB for load balancer id: %s",
|
1043
1088
|
loadbalancer[constants.LOADBALANCER_ID])
|
1044
|
-
|
1045
|
-
|
1046
|
-
|
1089
|
+
with db_apis.session().begin() as session:
|
1090
|
+
self.loadbalancer_repo.update(
|
1091
|
+
session, loadbalancer[constants.LOADBALANCER_ID],
|
1092
|
+
provisioning_status=constants.ACTIVE)
|
1047
1093
|
|
1048
|
-
def _mark_listener_status(self, listener, status):
|
1049
|
-
self.listener_repo.update(
|
1094
|
+
def _mark_listener_status(self, session, listener, status):
|
1095
|
+
self.listener_repo.update(session,
|
1050
1096
|
listener.id,
|
1051
1097
|
provisioning_status=status)
|
1052
1098
|
LOG.debug("Marking all l7policies of listener %s %s",
|
1053
1099
|
listener.id, status)
|
1054
1100
|
for l7policy in listener.l7policies:
|
1055
|
-
self._mark_l7policy_status(l7policy, status)
|
1101
|
+
self._mark_l7policy_status(session, l7policy, status)
|
1056
1102
|
|
1057
1103
|
if listener.default_pool:
|
1058
1104
|
LOG.debug("Marking default pool of listener %s %s",
|
1059
1105
|
listener.id, status)
|
1060
|
-
self._mark_pool_status(listener.default_pool, status)
|
1106
|
+
self._mark_pool_status(session, listener.default_pool, status)
|
1061
1107
|
|
1062
|
-
def _mark_l7policy_status(self, l7policy, status):
|
1108
|
+
def _mark_l7policy_status(self, session, l7policy, status):
|
1063
1109
|
self.l7policy_repo.update(
|
1064
|
-
|
1110
|
+
session, l7policy.id,
|
1065
1111
|
provisioning_status=status)
|
1066
1112
|
|
1067
1113
|
LOG.debug("Marking all l7rules of l7policy %s %s",
|
1068
1114
|
l7policy.id, status)
|
1069
1115
|
for l7rule in l7policy.l7rules:
|
1070
|
-
self._mark_l7rule_status(l7rule, status)
|
1116
|
+
self._mark_l7rule_status(session, l7rule, status)
|
1071
1117
|
|
1072
1118
|
if l7policy.redirect_pool:
|
1073
1119
|
LOG.debug("Marking redirect pool of l7policy %s %s",
|
1074
1120
|
l7policy.id, status)
|
1075
|
-
self._mark_pool_status(l7policy.redirect_pool, status)
|
1121
|
+
self._mark_pool_status(session, l7policy.redirect_pool, status)
|
1076
1122
|
|
1077
|
-
def _mark_l7rule_status(self, l7rule, status):
|
1123
|
+
def _mark_l7rule_status(self, session, l7rule, status):
|
1078
1124
|
self.l7rule_repo.update(
|
1079
|
-
|
1125
|
+
session, l7rule.id,
|
1080
1126
|
provisioning_status=status)
|
1081
1127
|
|
1082
|
-
def _mark_pool_status(self, pool, status):
|
1128
|
+
def _mark_pool_status(self, session, pool, status):
|
1083
1129
|
self.pool_repo.update(
|
1084
|
-
|
1130
|
+
session, pool.id,
|
1085
1131
|
provisioning_status=status)
|
1086
1132
|
if pool.health_monitor:
|
1087
1133
|
LOG.debug("Marking health monitor of pool %s %s", pool.id, status)
|
1088
|
-
self._mark_hm_status(pool.health_monitor, status)
|
1134
|
+
self._mark_hm_status(session, pool.health_monitor, status)
|
1089
1135
|
|
1090
1136
|
LOG.debug("Marking all members of pool %s %s", pool.id, status)
|
1091
1137
|
for member in pool.members:
|
1092
|
-
self._mark_member_status(member, status)
|
1138
|
+
self._mark_member_status(session, member, status)
|
1093
1139
|
|
1094
|
-
def _mark_hm_status(self, hm, status):
|
1140
|
+
def _mark_hm_status(self, session, hm, status):
|
1095
1141
|
self.health_mon_repo.update(
|
1096
|
-
|
1142
|
+
session, hm.id,
|
1097
1143
|
provisioning_status=status)
|
1098
1144
|
|
1099
|
-
def _mark_member_status(self, member, status):
|
1145
|
+
def _mark_member_status(self, session, member, status):
|
1100
1146
|
self.member_repo.update(
|
1101
|
-
|
1147
|
+
session, member.id,
|
1102
1148
|
provisioning_status=status)
|
1103
1149
|
|
1104
1150
|
def revert(self, loadbalancer, *args, **kwargs):
|
@@ -1112,17 +1158,25 @@ class MarkLBActiveInDB(BaseDatabaseTask):
|
|
1112
1158
|
"""
|
1113
1159
|
|
1114
1160
|
if self.mark_subobjects:
|
1115
|
-
LOG.debug("Marking all listeners of loadbalancer %s
|
1116
|
-
loadbalancer[constants.LOADBALANCER_ID])
|
1117
|
-
|
1118
|
-
|
1119
|
-
|
1120
|
-
|
1121
|
-
|
1122
|
-
|
1123
|
-
|
1124
|
-
|
1125
|
-
|
1161
|
+
LOG.debug("Marking all listeners and pools of loadbalancer %s"
|
1162
|
+
" ERROR", loadbalancer[constants.LOADBALANCER_ID])
|
1163
|
+
with db_apis.session().begin() as session:
|
1164
|
+
db_lb = self.loadbalancer_repo.get(
|
1165
|
+
session,
|
1166
|
+
id=loadbalancer[constants.LOADBALANCER_ID])
|
1167
|
+
for listener in db_lb.listeners:
|
1168
|
+
try:
|
1169
|
+
self._mark_listener_status(session, listener,
|
1170
|
+
constants.ERROR)
|
1171
|
+
except Exception:
|
1172
|
+
LOG.warning("Error updating listener %s provisioning "
|
1173
|
+
"status", listener.id)
|
1174
|
+
for pool in db_lb.pools:
|
1175
|
+
try:
|
1176
|
+
self._mark_pool_status(session, pool, constants.ERROR)
|
1177
|
+
except Exception:
|
1178
|
+
LOG.warning("Error updating POOL %s provisioning "
|
1179
|
+
"status", pool.id)
|
1126
1180
|
|
1127
1181
|
|
1128
1182
|
class MarkLBActiveInDBByListener(BaseDatabaseTask):
|
@@ -1140,9 +1194,10 @@ class MarkLBActiveInDBByListener(BaseDatabaseTask):
|
|
1140
1194
|
|
1141
1195
|
LOG.info("Mark ACTIVE in DB for load balancer id: %s",
|
1142
1196
|
listener[constants.LOADBALANCER_ID])
|
1143
|
-
|
1144
|
-
|
1145
|
-
|
1197
|
+
with db_apis.session().begin() as session:
|
1198
|
+
self.loadbalancer_repo.update(session,
|
1199
|
+
listener[constants.LOADBALANCER_ID],
|
1200
|
+
provisioning_status=constants.ACTIVE)
|
1146
1201
|
|
1147
1202
|
|
1148
1203
|
class UpdateLBServerGroupInDB(BaseDatabaseTask):
|
@@ -1159,9 +1214,10 @@ class UpdateLBServerGroupInDB(BaseDatabaseTask):
|
|
1159
1214
|
|
1160
1215
|
LOG.debug("Server Group updated with id: %s for load balancer id: %s:",
|
1161
1216
|
server_group_id, loadbalancer_id)
|
1162
|
-
|
1163
|
-
|
1164
|
-
|
1217
|
+
with db_apis.session().begin() as session:
|
1218
|
+
self.loadbalancer_repo.update(session,
|
1219
|
+
id=loadbalancer_id,
|
1220
|
+
server_group_id=server_group_id)
|
1165
1221
|
|
1166
1222
|
def revert(self, loadbalancer_id, server_group_id, *args, **kwargs):
|
1167
1223
|
"""Remove server group information from a load balancer in DB.
|
@@ -1175,9 +1231,10 @@ class UpdateLBServerGroupInDB(BaseDatabaseTask):
|
|
1175
1231
|
'load balancer id: %(s2)s ',
|
1176
1232
|
{'s1': server_group_id, 's2': loadbalancer_id})
|
1177
1233
|
try:
|
1178
|
-
|
1179
|
-
|
1180
|
-
|
1234
|
+
with db_apis.session().begin() as session:
|
1235
|
+
self.loadbalancer_repo.update(session,
|
1236
|
+
id=loadbalancer_id,
|
1237
|
+
server_group_id=None)
|
1181
1238
|
except Exception as e:
|
1182
1239
|
LOG.error("Failed to update load balancer %(lb)s "
|
1183
1240
|
"server_group_id to None due to: "
|
@@ -1199,9 +1256,10 @@ class MarkLBDeletedInDB(BaseDatabaseTask):
|
|
1199
1256
|
|
1200
1257
|
LOG.debug("Mark DELETED in DB for load balancer id: %s",
|
1201
1258
|
loadbalancer[constants.LOADBALANCER_ID])
|
1202
|
-
|
1203
|
-
|
1204
|
-
|
1259
|
+
with db_apis.session().begin() as session:
|
1260
|
+
self.loadbalancer_repo.update(
|
1261
|
+
session, loadbalancer[constants.LOADBALANCER_ID],
|
1262
|
+
provisioning_status=constants.DELETED)
|
1205
1263
|
|
1206
1264
|
|
1207
1265
|
class MarkLBPendingDeleteInDB(BaseDatabaseTask):
|
@@ -1219,10 +1277,10 @@ class MarkLBPendingDeleteInDB(BaseDatabaseTask):
|
|
1219
1277
|
|
1220
1278
|
LOG.debug("Mark PENDING DELETE in DB for load balancer id: %s",
|
1221
1279
|
loadbalancer[constants.LOADBALANCER_ID])
|
1222
|
-
|
1223
|
-
|
1224
|
-
|
1225
|
-
|
1280
|
+
with db_apis.session().begin() as session:
|
1281
|
+
self.loadbalancer_repo.update(
|
1282
|
+
session, loadbalancer[constants.LOADBALANCER_ID],
|
1283
|
+
provisioning_status=constants.PENDING_DELETE)
|
1226
1284
|
|
1227
1285
|
|
1228
1286
|
class MarkLBAndListenersActiveInDB(BaseDatabaseTask):
|
@@ -1249,11 +1307,13 @@ class MarkLBAndListenersActiveInDB(BaseDatabaseTask):
|
|
1249
1307
|
"and updating status for listener ids: %s", lb_id,
|
1250
1308
|
', '.join([listener[constants.LISTENER_ID]
|
1251
1309
|
for listener in listeners]))
|
1252
|
-
|
1253
|
-
|
1310
|
+
with db_apis.session().begin() as session:
|
1311
|
+
self.loadbalancer_repo.update(
|
1312
|
+
session, lb_id, provisioning_status=constants.ACTIVE)
|
1254
1313
|
for listener in listeners:
|
1255
|
-
|
1256
|
-
|
1314
|
+
with db_apis.session().begin() as session:
|
1315
|
+
self.listener_repo.prov_status_active_if_not_error(
|
1316
|
+
session, listener[constants.LISTENER_ID])
|
1257
1317
|
|
1258
1318
|
def revert(self, loadbalancer_id, listeners, *args, **kwargs):
|
1259
1319
|
"""Mark the load balancer and listeners as broken.
|
@@ -1287,8 +1347,9 @@ class MarkListenerDeletedInDB(BaseDatabaseTask):
|
|
1287
1347
|
"""
|
1288
1348
|
|
1289
1349
|
LOG.debug("Mark DELETED in DB for listener id: %s ", listener.id)
|
1290
|
-
|
1291
|
-
|
1350
|
+
with db_apis.session().begin() as session:
|
1351
|
+
self.listener_repo.update(session, listener.id,
|
1352
|
+
provisioning_status=constants.DELETED)
|
1292
1353
|
|
1293
1354
|
def revert(self, listener, *args, **kwargs):
|
1294
1355
|
"""Mark the listener ERROR since the delete couldn't happen
|
@@ -1317,8 +1378,10 @@ class MarkListenerPendingDeleteInDB(BaseDatabaseTask):
|
|
1317
1378
|
|
1318
1379
|
LOG.debug("Mark PENDING DELETE in DB for listener id: %s",
|
1319
1380
|
listener.id)
|
1320
|
-
|
1321
|
-
|
1381
|
+
with db_apis.session().begin() as session:
|
1382
|
+
self.listener_repo.update(
|
1383
|
+
session, listener.id,
|
1384
|
+
provisioning_status=constants.PENDING_DELETE)
|
1322
1385
|
|
1323
1386
|
def revert(self, listener, *args, **kwargs):
|
1324
1387
|
"""Mark the listener as broken and ready to be cleaned up.
|
@@ -1348,14 +1411,15 @@ class UpdateLoadbalancerInDB(BaseDatabaseTask):
|
|
1348
1411
|
|
1349
1412
|
LOG.debug("Update DB for loadbalancer id: %s ",
|
1350
1413
|
loadbalancer[constants.LOADBALANCER_ID])
|
1351
|
-
|
1352
|
-
|
1353
|
-
|
1354
|
-
|
1355
|
-
|
1356
|
-
|
1357
|
-
|
1358
|
-
|
1414
|
+
with db_apis.session().begin() as session:
|
1415
|
+
if update_dict.get('vip'):
|
1416
|
+
vip_dict = update_dict.pop('vip')
|
1417
|
+
self.vip_repo.update(session,
|
1418
|
+
loadbalancer[constants.LOADBALANCER_ID],
|
1419
|
+
**vip_dict)
|
1420
|
+
self.loadbalancer_repo.update(
|
1421
|
+
session, loadbalancer[constants.LOADBALANCER_ID],
|
1422
|
+
**update_dict)
|
1359
1423
|
|
1360
1424
|
|
1361
1425
|
class UpdateHealthMonInDB(BaseDatabaseTask):
|
@@ -1374,9 +1438,10 @@ class UpdateHealthMonInDB(BaseDatabaseTask):
|
|
1374
1438
|
|
1375
1439
|
LOG.debug("Update DB for health monitor id: %s ",
|
1376
1440
|
health_mon[constants.HEALTHMONITOR_ID])
|
1377
|
-
|
1378
|
-
|
1379
|
-
|
1441
|
+
with db_apis.session().begin() as session:
|
1442
|
+
self.health_mon_repo.update(session,
|
1443
|
+
health_mon[constants.HEALTHMONITOR_ID],
|
1444
|
+
**update_dict)
|
1380
1445
|
|
1381
1446
|
def revert(self, health_mon, *args, **kwargs):
|
1382
1447
|
"""Mark the health monitor ERROR since the update couldn't happen
|
@@ -1389,10 +1454,11 @@ class UpdateHealthMonInDB(BaseDatabaseTask):
|
|
1389
1454
|
"for health monitor id %s",
|
1390
1455
|
health_mon[constants.HEALTHMONITOR_ID])
|
1391
1456
|
try:
|
1392
|
-
|
1393
|
-
|
1394
|
-
|
1395
|
-
|
1457
|
+
with db_apis.session().begin() as session:
|
1458
|
+
self.health_mon_repo.update(
|
1459
|
+
session,
|
1460
|
+
health_mon[constants.HEALTHMONITOR_ID],
|
1461
|
+
provisioning_status=constants.ERROR)
|
1396
1462
|
except Exception as e:
|
1397
1463
|
LOG.error("Failed to update health monitor %(hm)s "
|
1398
1464
|
"provisioning_status to ERROR due to: %(except)s",
|
@@ -1416,9 +1482,10 @@ class UpdateListenerInDB(BaseDatabaseTask):
|
|
1416
1482
|
|
1417
1483
|
LOG.debug("Update DB for listener id: %s ",
|
1418
1484
|
listener[constants.LISTENER_ID])
|
1419
|
-
|
1420
|
-
|
1421
|
-
|
1485
|
+
with db_apis.session().begin() as session:
|
1486
|
+
self.listener_repo.update(session,
|
1487
|
+
listener[constants.LISTENER_ID],
|
1488
|
+
**update_dict)
|
1422
1489
|
|
1423
1490
|
def revert(self, listener, *args, **kwargs):
|
1424
1491
|
"""Mark the listener ERROR since the update couldn't happen
|
@@ -1448,9 +1515,10 @@ class UpdateMemberInDB(BaseDatabaseTask):
|
|
1448
1515
|
"""
|
1449
1516
|
|
1450
1517
|
LOG.debug("Update DB for member id: %s ", member[constants.MEMBER_ID])
|
1451
|
-
|
1452
|
-
|
1453
|
-
|
1518
|
+
with db_apis.session().begin() as session:
|
1519
|
+
self.member_repo.update(session,
|
1520
|
+
member[constants.MEMBER_ID],
|
1521
|
+
**update_dict)
|
1454
1522
|
|
1455
1523
|
def revert(self, member, *args, **kwargs):
|
1456
1524
|
"""Mark the member ERROR since the update couldn't happen
|
@@ -1462,9 +1530,10 @@ class UpdateMemberInDB(BaseDatabaseTask):
|
|
1462
1530
|
LOG.warning("Reverting update member in DB "
|
1463
1531
|
"for member id %s", member[constants.MEMBER_ID])
|
1464
1532
|
try:
|
1465
|
-
|
1466
|
-
|
1467
|
-
|
1533
|
+
with db_apis.session().begin() as session:
|
1534
|
+
self.member_repo.update(session,
|
1535
|
+
member[constants.MEMBER_ID],
|
1536
|
+
provisioning_status=constants.ERROR)
|
1468
1537
|
except Exception as e:
|
1469
1538
|
LOG.error("Failed to update member %(member)s provisioning_status "
|
1470
1539
|
"to ERROR due to: %(except)s",
|
@@ -1487,8 +1556,9 @@ class UpdatePoolInDB(BaseDatabaseTask):
|
|
1487
1556
|
"""
|
1488
1557
|
|
1489
1558
|
LOG.debug("Update DB for pool id: %s ", pool_id)
|
1490
|
-
|
1491
|
-
|
1559
|
+
with db_apis.session().begin() as session:
|
1560
|
+
self.repos.update_pool_and_sp(session, pool_id,
|
1561
|
+
update_dict)
|
1492
1562
|
|
1493
1563
|
def revert(self, pool_id, *args, **kwargs):
|
1494
1564
|
"""Mark the pool ERROR since the update couldn't happen
|
@@ -1499,9 +1569,10 @@ class UpdatePoolInDB(BaseDatabaseTask):
|
|
1499
1569
|
|
1500
1570
|
LOG.warning("Reverting update pool in DB for pool id %s", pool_id)
|
1501
1571
|
try:
|
1502
|
-
|
1503
|
-
|
1504
|
-
|
1572
|
+
with db_apis.session().begin() as session:
|
1573
|
+
self.repos.update_pool_and_sp(
|
1574
|
+
session, pool_id,
|
1575
|
+
{'provisioning_status': constants.ERROR})
|
1505
1576
|
except Exception as e:
|
1506
1577
|
LOG.error("Failed to update pool %(pool)s provisioning_status to "
|
1507
1578
|
"ERROR due to: %(except)s", {'pool': pool_id,
|
@@ -1524,9 +1595,10 @@ class UpdateL7PolicyInDB(BaseDatabaseTask):
|
|
1524
1595
|
|
1525
1596
|
LOG.debug("Update DB for l7policy id: %s",
|
1526
1597
|
l7policy[constants.L7POLICY_ID])
|
1527
|
-
|
1528
|
-
|
1529
|
-
|
1598
|
+
with db_apis.session().begin() as session:
|
1599
|
+
self.l7policy_repo.update(session,
|
1600
|
+
l7policy[constants.L7POLICY_ID],
|
1601
|
+
**update_dict)
|
1530
1602
|
|
1531
1603
|
def revert(self, l7policy, *args, **kwargs):
|
1532
1604
|
"""Mark the l7policy ERROR since the update couldn't happen
|
@@ -1538,9 +1610,10 @@ class UpdateL7PolicyInDB(BaseDatabaseTask):
|
|
1538
1610
|
LOG.warning("Reverting update l7policy in DB "
|
1539
1611
|
"for l7policy id %s", l7policy[constants.L7POLICY_ID])
|
1540
1612
|
try:
|
1541
|
-
|
1542
|
-
|
1543
|
-
|
1613
|
+
with db_apis.session().begin() as session:
|
1614
|
+
self.l7policy_repo.update(session,
|
1615
|
+
l7policy[constants.L7POLICY_ID],
|
1616
|
+
provisioning_status=constants.ERROR)
|
1544
1617
|
except Exception as e:
|
1545
1618
|
LOG.error("Failed to update l7policy %(l7p)s provisioning_status "
|
1546
1619
|
"to ERROR due to: %(except)s",
|
@@ -1563,9 +1636,10 @@ class UpdateL7RuleInDB(BaseDatabaseTask):
|
|
1563
1636
|
"""
|
1564
1637
|
|
1565
1638
|
LOG.debug("Update DB for l7rule id: %s", l7rule[constants.L7RULE_ID])
|
1566
|
-
|
1567
|
-
|
1568
|
-
|
1639
|
+
with db_apis.session().begin() as session:
|
1640
|
+
self.l7rule_repo.update(session,
|
1641
|
+
l7rule[constants.L7RULE_ID],
|
1642
|
+
**update_dict)
|
1569
1643
|
|
1570
1644
|
def revert(self, l7rule, *args, **kwargs):
|
1571
1645
|
"""Mark the L7 rule ERROR since the update couldn't happen
|
@@ -1577,9 +1651,10 @@ class UpdateL7RuleInDB(BaseDatabaseTask):
|
|
1577
1651
|
LOG.warning("Reverting update l7rule in DB "
|
1578
1652
|
"for l7rule id %s", l7rule[constants.L7RULE_ID])
|
1579
1653
|
try:
|
1580
|
-
|
1581
|
-
|
1582
|
-
|
1654
|
+
with db_apis.session().begin() as session:
|
1655
|
+
self.l7policy_repo.update(session,
|
1656
|
+
l7rule[constants.L7POLICY_ID],
|
1657
|
+
provisioning_status=constants.ERROR)
|
1583
1658
|
except Exception as e:
|
1584
1659
|
LOG.error("Failed to update L7rule %(l7r)s provisioning_status to "
|
1585
1660
|
"ERROR due to: %(except)s",
|
@@ -1595,8 +1670,9 @@ class GetAmphoraDetails(BaseDatabaseTask):
|
|
1595
1670
|
:param amphora: Amphora which network details are required
|
1596
1671
|
:returns: Amphora data dict
|
1597
1672
|
"""
|
1598
|
-
|
1599
|
-
|
1673
|
+
with db_apis.session().begin() as session:
|
1674
|
+
db_amp = self.amphora_repo.get(session,
|
1675
|
+
id=amphora.get(constants.ID))
|
1600
1676
|
amphora.update({
|
1601
1677
|
constants.VRRP_IP: db_amp.vrrp_ip,
|
1602
1678
|
constants.HA_IP: db_amp.ha_ip,
|
@@ -1618,14 +1694,15 @@ class GetAmphoraeFromLoadbalancer(BaseDatabaseTask):
|
|
1618
1694
|
:returns: A list of Listener objects
|
1619
1695
|
"""
|
1620
1696
|
amphorae = []
|
1621
|
-
|
1622
|
-
|
1623
|
-
|
1624
|
-
|
1625
|
-
|
1626
|
-
|
1627
|
-
|
1628
|
-
|
1697
|
+
with db_apis.session().begin() as session:
|
1698
|
+
db_lb = self.repos.load_balancer.get(session,
|
1699
|
+
id=loadbalancer_id)
|
1700
|
+
for amp in db_lb.amphorae:
|
1701
|
+
a = self.amphora_repo.get(session, id=amp.id,
|
1702
|
+
show_deleted=False)
|
1703
|
+
if a is None:
|
1704
|
+
continue
|
1705
|
+
amphorae.append(a.to_dict())
|
1629
1706
|
return amphorae
|
1630
1707
|
|
1631
1708
|
|
@@ -1639,14 +1716,15 @@ class GetListenersFromLoadbalancer(BaseDatabaseTask):
|
|
1639
1716
|
:returns: A list of Listener objects
|
1640
1717
|
"""
|
1641
1718
|
listeners = []
|
1642
|
-
|
1643
|
-
|
1644
|
-
|
1645
|
-
|
1646
|
-
|
1647
|
-
|
1648
|
-
|
1649
|
-
|
1719
|
+
with db_apis.session().begin() as session:
|
1720
|
+
db_lb = self.repos.load_balancer.get(
|
1721
|
+
session, id=loadbalancer[constants.LOADBALANCER_ID])
|
1722
|
+
for listener in db_lb.listeners:
|
1723
|
+
db_l = self.listener_repo.get(session, id=listener.id)
|
1724
|
+
prov_listener = (
|
1725
|
+
provider_utils.db_listener_to_provider_listener(
|
1726
|
+
db_l))
|
1727
|
+
listeners.append(prov_listener.to_dict())
|
1650
1728
|
return listeners
|
1651
1729
|
|
1652
1730
|
|
@@ -1659,8 +1737,9 @@ class GetVipFromLoadbalancer(BaseDatabaseTask):
|
|
1659
1737
|
:param loadbalancer: Load balancer which VIP is required
|
1660
1738
|
:returns: VIP associated with a given load balancer
|
1661
1739
|
"""
|
1662
|
-
|
1663
|
-
|
1740
|
+
with db_apis.session().begin() as session:
|
1741
|
+
db_lb = self.repos.load_balancer.get(
|
1742
|
+
session, id=loadbalancer[constants.LOADBALANCER_ID])
|
1664
1743
|
return db_lb.vip.to_dict(recurse=True)
|
1665
1744
|
|
1666
1745
|
|
@@ -1676,10 +1755,12 @@ class GetLoadBalancer(BaseDatabaseTask):
|
|
1676
1755
|
|
1677
1756
|
LOG.debug("Get load balancer from DB for load balancer id: %s",
|
1678
1757
|
loadbalancer_id)
|
1679
|
-
|
1680
|
-
|
1681
|
-
|
1682
|
-
|
1758
|
+
with db_apis.session().begin() as session:
|
1759
|
+
db_lb = self.loadbalancer_repo.get(session,
|
1760
|
+
id=loadbalancer_id)
|
1761
|
+
provider_lb = (
|
1762
|
+
provider_utils.db_loadbalancer_to_provider_loadbalancer(
|
1763
|
+
db_lb))
|
1683
1764
|
return provider_lb.to_dict()
|
1684
1765
|
|
1685
1766
|
|
@@ -1693,13 +1774,15 @@ class CreateVRRPGroupForLB(BaseDatabaseTask):
|
|
1693
1774
|
should be created
|
1694
1775
|
"""
|
1695
1776
|
try:
|
1696
|
-
|
1697
|
-
|
1698
|
-
|
1699
|
-
|
1700
|
-
|
1701
|
-
|
1702
|
-
|
1777
|
+
with db_apis.session().begin() as session:
|
1778
|
+
self.repos.vrrpgroup.create(
|
1779
|
+
session,
|
1780
|
+
load_balancer_id=loadbalancer_id,
|
1781
|
+
vrrp_group_name=str(loadbalancer_id).replace('-', ''),
|
1782
|
+
vrrp_auth_type=constants.VRRP_AUTH_DEFAULT,
|
1783
|
+
vrrp_auth_pass=(
|
1784
|
+
uuidutils.generate_uuid().replace('-', '')[0:7]),
|
1785
|
+
advert_int=CONF.keepalived_vrrp.vrrp_advert_int)
|
1703
1786
|
except odb_exceptions.DBDuplicateEntry:
|
1704
1787
|
LOG.debug('VRRP_GROUP entry already exists for load balancer, '
|
1705
1788
|
'skipping create.')
|
@@ -1718,7 +1801,8 @@ class DisableAmphoraHealthMonitoring(BaseDatabaseTask):
|
|
1718
1801
|
:param amphora: The amphora to disable health monitoring for
|
1719
1802
|
:returns: None
|
1720
1803
|
"""
|
1721
|
-
|
1804
|
+
with db_apis.session().begin() as session:
|
1805
|
+
self._delete_from_amp_health(session, amphora[constants.ID])
|
1722
1806
|
|
1723
1807
|
|
1724
1808
|
class DisableLBAmphoraeHealthMonitoring(BaseDatabaseTask):
|
@@ -1734,10 +1818,11 @@ class DisableLBAmphoraeHealthMonitoring(BaseDatabaseTask):
|
|
1734
1818
|
:param loadbalancer: The load balancer to disable health monitoring on
|
1735
1819
|
:returns: None
|
1736
1820
|
"""
|
1737
|
-
|
1738
|
-
|
1739
|
-
|
1740
|
-
|
1821
|
+
with db_apis.session().begin() as session:
|
1822
|
+
db_lb = self.loadbalancer_repo.get(
|
1823
|
+
session, id=loadbalancer[constants.LOADBALANCER_ID])
|
1824
|
+
for amphora in db_lb.amphorae:
|
1825
|
+
self._delete_from_amp_health(session, amphora.id)
|
1741
1826
|
|
1742
1827
|
|
1743
1828
|
class MarkAmphoraHealthBusy(BaseDatabaseTask):
|
@@ -1753,7 +1838,8 @@ class MarkAmphoraHealthBusy(BaseDatabaseTask):
|
|
1753
1838
|
:param amphora: The amphora to mark amphora health busy
|
1754
1839
|
:returns: None
|
1755
1840
|
"""
|
1756
|
-
|
1841
|
+
with db_apis.session().begin() as session:
|
1842
|
+
self._mark_amp_health_busy(session, amphora[constants.ID])
|
1757
1843
|
|
1758
1844
|
|
1759
1845
|
class MarkLBAmphoraeHealthBusy(BaseDatabaseTask):
|
@@ -1769,10 +1855,11 @@ class MarkLBAmphoraeHealthBusy(BaseDatabaseTask):
|
|
1769
1855
|
:param loadbalancer: The load balancer to mark amphorae health busy
|
1770
1856
|
:returns: None
|
1771
1857
|
"""
|
1772
|
-
|
1773
|
-
|
1774
|
-
|
1775
|
-
|
1858
|
+
with db_apis.session().begin() as session:
|
1859
|
+
db_lb = self.loadbalancer_repo.get(
|
1860
|
+
session, id=loadbalancer[constants.LOADBALANCER_ID])
|
1861
|
+
for amphora in db_lb.amphorae:
|
1862
|
+
self._mark_amp_health_busy(session, amphora.id)
|
1776
1863
|
|
1777
1864
|
|
1778
1865
|
class MarkHealthMonitorActiveInDB(BaseDatabaseTask):
|
@@ -1790,14 +1877,15 @@ class MarkHealthMonitorActiveInDB(BaseDatabaseTask):
|
|
1790
1877
|
|
1791
1878
|
LOG.debug("Mark ACTIVE in DB for health monitor id: %s",
|
1792
1879
|
health_mon[constants.HEALTHMONITOR_ID])
|
1793
|
-
|
1794
|
-
|
1795
|
-
|
1796
|
-
|
1797
|
-
|
1798
|
-
|
1799
|
-
|
1800
|
-
|
1880
|
+
with db_apis.session().begin() as session:
|
1881
|
+
db_health_mon = self.health_mon_repo.get(
|
1882
|
+
session, id=health_mon[constants.HEALTHMONITOR_ID])
|
1883
|
+
op_status = (constants.ONLINE if db_health_mon.enabled
|
1884
|
+
else constants.OFFLINE)
|
1885
|
+
self.health_mon_repo.update(session,
|
1886
|
+
health_mon[constants.HEALTHMONITOR_ID],
|
1887
|
+
provisioning_status=constants.ACTIVE,
|
1888
|
+
operating_status=op_status)
|
1801
1889
|
|
1802
1890
|
def revert(self, health_mon, *args, **kwargs):
|
1803
1891
|
"""Mark the health monitor as broken
|
@@ -1828,10 +1916,11 @@ class MarkHealthMonitorPendingCreateInDB(BaseDatabaseTask):
|
|
1828
1916
|
|
1829
1917
|
LOG.debug("Mark PENDING CREATE in DB for health monitor id: %s",
|
1830
1918
|
health_mon[constants.HEALTHMONITOR_ID])
|
1831
|
-
|
1832
|
-
|
1833
|
-
|
1834
|
-
|
1919
|
+
with db_apis.session().begin() as session:
|
1920
|
+
self.health_mon_repo.update(session,
|
1921
|
+
health_mon[constants.HEALTHMONITOR_ID],
|
1922
|
+
provisioning_status=(constants.
|
1923
|
+
PENDING_CREATE))
|
1835
1924
|
|
1836
1925
|
def revert(self, health_mon, *args, **kwargs):
|
1837
1926
|
"""Mark the health monitor as broken
|
@@ -1862,10 +1951,11 @@ class MarkHealthMonitorPendingDeleteInDB(BaseDatabaseTask):
|
|
1862
1951
|
|
1863
1952
|
LOG.debug("Mark PENDING DELETE in DB for health monitor id: %s",
|
1864
1953
|
health_mon[constants.HEALTHMONITOR_ID])
|
1865
|
-
|
1866
|
-
|
1867
|
-
|
1868
|
-
|
1954
|
+
with db_apis.session().begin() as session:
|
1955
|
+
self.health_mon_repo.update(session,
|
1956
|
+
health_mon[constants.HEALTHMONITOR_ID],
|
1957
|
+
provisioning_status=(constants.
|
1958
|
+
PENDING_DELETE))
|
1869
1959
|
|
1870
1960
|
def revert(self, health_mon, *args, **kwargs):
|
1871
1961
|
"""Mark the health monitor as broken
|
@@ -1896,10 +1986,11 @@ class MarkHealthMonitorPendingUpdateInDB(BaseDatabaseTask):
|
|
1896
1986
|
|
1897
1987
|
LOG.debug("Mark PENDING UPDATE in DB for health monitor id: %s",
|
1898
1988
|
health_mon[constants.HEALTHMONITOR_ID])
|
1899
|
-
|
1900
|
-
|
1901
|
-
|
1902
|
-
|
1989
|
+
with db_apis.session().begin() as session:
|
1990
|
+
self.health_mon_repo.update(session,
|
1991
|
+
health_mon[constants.HEALTHMONITOR_ID],
|
1992
|
+
provisioning_status=(constants.
|
1993
|
+
PENDING_UPDATE))
|
1903
1994
|
|
1904
1995
|
def revert(self, health_mon, *args, **kwargs):
|
1905
1996
|
"""Mark the health monitor as broken
|
@@ -1915,6 +2006,41 @@ class MarkHealthMonitorPendingUpdateInDB(BaseDatabaseTask):
|
|
1915
2006
|
health_mon[constants.HEALTHMONITOR_ID])
|
1916
2007
|
|
1917
2008
|
|
2009
|
+
class MarkHealthMonitorsOnlineInDB(BaseDatabaseTask):
|
2010
|
+
def execute(self, loadbalancer: dict):
|
2011
|
+
"""Mark all enabled health monitors Online
|
2012
|
+
|
2013
|
+
:param loadbalancer: Dictionary of a Load Balancer that has associated
|
2014
|
+
health monitors
|
2015
|
+
:returns: None
|
2016
|
+
"""
|
2017
|
+
|
2018
|
+
with db_apis.session().begin() as session:
|
2019
|
+
db_lb = self.loadbalancer_repo.get(
|
2020
|
+
session, id=loadbalancer[constants.LOADBALANCER_ID])
|
2021
|
+
|
2022
|
+
# Update the healthmonitors of either attached listeners or
|
2023
|
+
# l7policies
|
2024
|
+
hms_to_update = []
|
2025
|
+
|
2026
|
+
for listener in db_lb.listeners:
|
2027
|
+
if (listener.default_pool and
|
2028
|
+
listener.default_pool.health_monitor):
|
2029
|
+
hm = listener.default_pool.health_monitor
|
2030
|
+
if hm.enabled:
|
2031
|
+
hms_to_update.append(hm.id)
|
2032
|
+
for l7policy in listener.l7policies:
|
2033
|
+
if l7policy.redirect_pool and (
|
2034
|
+
l7policy.redirect_pool.health_monitor):
|
2035
|
+
hm = l7policy.redirect_pool.health_monitor
|
2036
|
+
if hm.enabled:
|
2037
|
+
hms_to_update.append(hm.id)
|
2038
|
+
|
2039
|
+
for hm_id in hms_to_update:
|
2040
|
+
self.health_mon_repo.update(
|
2041
|
+
session, hm_id, operating_status=constants.ONLINE)
|
2042
|
+
|
2043
|
+
|
1918
2044
|
class MarkL7PolicyActiveInDB(BaseDatabaseTask):
|
1919
2045
|
"""Mark the l7policy ACTIVE in the DB.
|
1920
2046
|
|
@@ -1930,14 +2056,15 @@ class MarkL7PolicyActiveInDB(BaseDatabaseTask):
|
|
1930
2056
|
|
1931
2057
|
LOG.debug("Mark ACTIVE in DB for l7policy id: %s",
|
1932
2058
|
l7policy[constants.L7POLICY_ID])
|
1933
|
-
|
1934
|
-
|
1935
|
-
|
1936
|
-
|
1937
|
-
|
1938
|
-
|
1939
|
-
|
1940
|
-
|
2059
|
+
with db_apis.session().begin() as session:
|
2060
|
+
db_l7policy = self.l7policy_repo.get(
|
2061
|
+
session, id=l7policy[constants.L7POLICY_ID])
|
2062
|
+
op_status = (constants.ONLINE if db_l7policy.enabled
|
2063
|
+
else constants.OFFLINE)
|
2064
|
+
self.l7policy_repo.update(session,
|
2065
|
+
l7policy[constants.L7POLICY_ID],
|
2066
|
+
provisioning_status=constants.ACTIVE,
|
2067
|
+
operating_status=op_status)
|
1941
2068
|
|
1942
2069
|
def revert(self, l7policy, *args, **kwargs):
|
1943
2070
|
"""Mark the l7policy as broken
|
@@ -1967,9 +2094,10 @@ class MarkL7PolicyPendingCreateInDB(BaseDatabaseTask):
|
|
1967
2094
|
|
1968
2095
|
LOG.debug("Mark PENDING CREATE in DB for l7policy id: %s",
|
1969
2096
|
l7policy[constants.L7POLICY_ID])
|
1970
|
-
|
1971
|
-
|
1972
|
-
|
2097
|
+
with db_apis.session().begin() as session:
|
2098
|
+
self.l7policy_repo.update(
|
2099
|
+
session, l7policy[constants.L7POLICY_ID],
|
2100
|
+
provisioning_status=constants.PENDING_CREATE)
|
1973
2101
|
|
1974
2102
|
def revert(self, l7policy, *args, **kwargs):
|
1975
2103
|
"""Mark the l7policy as broken
|
@@ -1999,9 +2127,10 @@ class MarkL7PolicyPendingDeleteInDB(BaseDatabaseTask):
|
|
1999
2127
|
|
2000
2128
|
LOG.debug("Mark PENDING DELETE in DB for l7policy id: %s",
|
2001
2129
|
l7policy[constants.L7POLICY_ID])
|
2002
|
-
|
2003
|
-
|
2004
|
-
|
2130
|
+
with db_apis.session().begin() as session:
|
2131
|
+
self.l7policy_repo.update(
|
2132
|
+
session, l7policy[constants.L7POLICY_ID],
|
2133
|
+
provisioning_status=constants.PENDING_DELETE)
|
2005
2134
|
|
2006
2135
|
def revert(self, l7policy, *args, **kwargs):
|
2007
2136
|
"""Mark the l7policy as broken
|
@@ -2031,10 +2160,11 @@ class MarkL7PolicyPendingUpdateInDB(BaseDatabaseTask):
|
|
2031
2160
|
|
2032
2161
|
LOG.debug("Mark PENDING UPDATE in DB for l7policy id: %s",
|
2033
2162
|
l7policy[constants.L7POLICY_ID])
|
2034
|
-
|
2035
|
-
|
2036
|
-
|
2037
|
-
|
2163
|
+
with db_apis.session().begin() as session:
|
2164
|
+
self.l7policy_repo.update(session,
|
2165
|
+
l7policy[constants.L7POLICY_ID],
|
2166
|
+
provisioning_status=(constants.
|
2167
|
+
PENDING_UPDATE))
|
2038
2168
|
|
2039
2169
|
def revert(self, l7policy, *args, **kwargs):
|
2040
2170
|
"""Mark the l7policy as broken
|
@@ -2064,14 +2194,15 @@ class MarkL7RuleActiveInDB(BaseDatabaseTask):
|
|
2064
2194
|
|
2065
2195
|
LOG.debug("Mark ACTIVE in DB for l7rule id: %s",
|
2066
2196
|
l7rule[constants.L7RULE_ID])
|
2067
|
-
|
2068
|
-
|
2069
|
-
|
2070
|
-
|
2071
|
-
|
2072
|
-
|
2073
|
-
|
2074
|
-
|
2197
|
+
with db_apis.session().begin() as session:
|
2198
|
+
db_rule = self.l7rule_repo.get(session,
|
2199
|
+
id=l7rule[constants.L7RULE_ID])
|
2200
|
+
op_status = (constants.ONLINE if db_rule.enabled
|
2201
|
+
else constants.OFFLINE)
|
2202
|
+
self.l7rule_repo.update(session,
|
2203
|
+
l7rule[constants.L7RULE_ID],
|
2204
|
+
provisioning_status=constants.ACTIVE,
|
2205
|
+
operating_status=op_status)
|
2075
2206
|
|
2076
2207
|
def revert(self, l7rule, *args, **kwargs):
|
2077
2208
|
"""Mark the l7rule as broken
|
@@ -2101,9 +2232,10 @@ class MarkL7RulePendingCreateInDB(BaseDatabaseTask):
|
|
2101
2232
|
|
2102
2233
|
LOG.debug("Mark PENDING CREATE in DB for l7rule id: %s",
|
2103
2234
|
l7rule[constants.L7RULE_ID])
|
2104
|
-
|
2105
|
-
|
2106
|
-
|
2235
|
+
with db_apis.session().begin() as session:
|
2236
|
+
self.l7rule_repo.update(
|
2237
|
+
session, l7rule[constants.L7RULE_ID],
|
2238
|
+
provisioning_status=constants.PENDING_CREATE)
|
2107
2239
|
|
2108
2240
|
def revert(self, l7rule, *args, **kwargs):
|
2109
2241
|
"""Mark the l7rule as broken
|
@@ -2133,9 +2265,10 @@ class MarkL7RulePendingDeleteInDB(BaseDatabaseTask):
|
|
2133
2265
|
|
2134
2266
|
LOG.debug("Mark PENDING DELETE in DB for l7rule id: %s",
|
2135
2267
|
l7rule[constants.L7RULE_ID])
|
2136
|
-
|
2137
|
-
|
2138
|
-
|
2268
|
+
with db_apis.session().begin() as session:
|
2269
|
+
self.l7rule_repo.update(
|
2270
|
+
session, l7rule[constants.L7RULE_ID],
|
2271
|
+
provisioning_status=constants.PENDING_DELETE)
|
2139
2272
|
|
2140
2273
|
def revert(self, l7rule, *args, **kwargs):
|
2141
2274
|
"""Mark the l7rule as broken
|
@@ -2165,9 +2298,10 @@ class MarkL7RulePendingUpdateInDB(BaseDatabaseTask):
|
|
2165
2298
|
|
2166
2299
|
LOG.debug("Mark PENDING UPDATE in DB for l7rule id: %s",
|
2167
2300
|
l7rule[constants.L7RULE_ID])
|
2168
|
-
|
2169
|
-
|
2170
|
-
|
2301
|
+
with db_apis.session().begin() as session:
|
2302
|
+
self.l7rule_repo.update(
|
2303
|
+
session, l7rule[constants.L7RULE_ID],
|
2304
|
+
provisioning_status=constants.PENDING_UPDATE)
|
2171
2305
|
|
2172
2306
|
def revert(self, l7rule, *args, **kwargs):
|
2173
2307
|
"""Mark the l7rule as broken
|
@@ -2197,9 +2331,10 @@ class MarkMemberActiveInDB(BaseDatabaseTask):
|
|
2197
2331
|
|
2198
2332
|
LOG.debug("Mark ACTIVE in DB for member id: %s",
|
2199
2333
|
member[constants.MEMBER_ID])
|
2200
|
-
|
2201
|
-
|
2202
|
-
|
2334
|
+
with db_apis.session().begin() as session:
|
2335
|
+
self.member_repo.update(session,
|
2336
|
+
member[constants.MEMBER_ID],
|
2337
|
+
provisioning_status=constants.ACTIVE)
|
2203
2338
|
|
2204
2339
|
def revert(self, member, *args, **kwargs):
|
2205
2340
|
"""Mark the member as broken
|
@@ -2229,9 +2364,10 @@ class MarkMemberPendingCreateInDB(BaseDatabaseTask):
|
|
2229
2364
|
|
2230
2365
|
LOG.debug("Mark PENDING CREATE in DB for member id: %s",
|
2231
2366
|
member[constants.MEMBER_ID])
|
2232
|
-
|
2233
|
-
|
2234
|
-
|
2367
|
+
with db_apis.session().begin() as session:
|
2368
|
+
self.member_repo.update(
|
2369
|
+
session, member[constants.MEMBER_ID],
|
2370
|
+
provisioning_status=constants.PENDING_CREATE)
|
2235
2371
|
|
2236
2372
|
def revert(self, member, *args, **kwargs):
|
2237
2373
|
"""Mark the member as broken
|
@@ -2261,9 +2397,10 @@ class MarkMemberPendingDeleteInDB(BaseDatabaseTask):
|
|
2261
2397
|
|
2262
2398
|
LOG.debug("Mark PENDING DELETE in DB for member id: %s",
|
2263
2399
|
member[constants.MEMBER_ID])
|
2264
|
-
|
2265
|
-
|
2266
|
-
|
2400
|
+
with db_apis.session().begin() as session:
|
2401
|
+
self.member_repo.update(
|
2402
|
+
session, member[constants.MEMBER_ID],
|
2403
|
+
provisioning_status=constants.PENDING_DELETE)
|
2267
2404
|
|
2268
2405
|
def revert(self, member, *args, **kwargs):
|
2269
2406
|
"""Mark the member as broken
|
@@ -2293,9 +2430,10 @@ class MarkMemberPendingUpdateInDB(BaseDatabaseTask):
|
|
2293
2430
|
|
2294
2431
|
LOG.debug("Mark PENDING UPDATE in DB for member id: %s",
|
2295
2432
|
member[constants.MEMBER_ID])
|
2296
|
-
|
2297
|
-
|
2298
|
-
|
2433
|
+
with db_apis.session().begin() as session:
|
2434
|
+
self.member_repo.update(
|
2435
|
+
session, member[constants.MEMBER_ID],
|
2436
|
+
provisioning_status=constants.PENDING_UPDATE)
|
2299
2437
|
|
2300
2438
|
def revert(self, member, *args, **kwargs):
|
2301
2439
|
"""Mark the member as broken
|
@@ -2325,9 +2463,10 @@ class MarkPoolActiveInDB(BaseDatabaseTask):
|
|
2325
2463
|
|
2326
2464
|
LOG.debug("Mark ACTIVE in DB for pool id: %s",
|
2327
2465
|
pool_id)
|
2328
|
-
|
2329
|
-
|
2330
|
-
|
2466
|
+
with db_apis.session().begin() as session:
|
2467
|
+
self.pool_repo.update(session,
|
2468
|
+
pool_id,
|
2469
|
+
provisioning_status=constants.ACTIVE)
|
2331
2470
|
|
2332
2471
|
def revert(self, pool_id, *args, **kwargs):
|
2333
2472
|
"""Mark the pool as broken
|
@@ -2356,9 +2495,10 @@ class MarkPoolPendingCreateInDB(BaseDatabaseTask):
|
|
2356
2495
|
|
2357
2496
|
LOG.debug("Mark PENDING CREATE in DB for pool id: %s",
|
2358
2497
|
pool_id)
|
2359
|
-
|
2360
|
-
|
2361
|
-
|
2498
|
+
with db_apis.session().begin() as session:
|
2499
|
+
self.pool_repo.update(session,
|
2500
|
+
pool_id,
|
2501
|
+
provisioning_status=constants.PENDING_CREATE)
|
2362
2502
|
|
2363
2503
|
def revert(self, pool_id, *args, **kwargs):
|
2364
2504
|
"""Mark the pool as broken
|
@@ -2387,9 +2527,10 @@ class MarkPoolPendingDeleteInDB(BaseDatabaseTask):
|
|
2387
2527
|
|
2388
2528
|
LOG.debug("Mark PENDING DELETE in DB for pool id: %s",
|
2389
2529
|
pool_id)
|
2390
|
-
|
2391
|
-
|
2392
|
-
|
2530
|
+
with db_apis.session().begin() as session:
|
2531
|
+
self.pool_repo.update(session,
|
2532
|
+
pool_id,
|
2533
|
+
provisioning_status=constants.PENDING_DELETE)
|
2393
2534
|
|
2394
2535
|
def revert(self, pool_id, *args, **kwargs):
|
2395
2536
|
"""Mark the pool as broken
|
@@ -2418,9 +2559,10 @@ class MarkPoolPendingUpdateInDB(BaseDatabaseTask):
|
|
2418
2559
|
|
2419
2560
|
LOG.debug("Mark PENDING UPDATE in DB for pool id: %s",
|
2420
2561
|
pool_id)
|
2421
|
-
|
2422
|
-
|
2423
|
-
|
2562
|
+
with db_apis.session().begin() as session:
|
2563
|
+
self.pool_repo.update(session,
|
2564
|
+
pool_id,
|
2565
|
+
provisioning_status=constants.PENDING_UPDATE)
|
2424
2566
|
|
2425
2567
|
def revert(self, pool_id, *args, **kwargs):
|
2426
2568
|
"""Mark the pool as broken
|
@@ -2450,7 +2592,7 @@ class DecrementHealthMonitorQuota(BaseDatabaseTask):
|
|
2450
2592
|
LOG.debug("Decrementing health monitor quota for "
|
2451
2593
|
"project: %s ", project_id)
|
2452
2594
|
|
2453
|
-
lock_session = db_apis.get_session(
|
2595
|
+
lock_session = db_apis.get_session()
|
2454
2596
|
try:
|
2455
2597
|
self.repos.decrement_quota(lock_session,
|
2456
2598
|
data_models.HealthMonitor,
|
@@ -2479,7 +2621,7 @@ class DecrementHealthMonitorQuota(BaseDatabaseTask):
|
|
2479
2621
|
|
2480
2622
|
try:
|
2481
2623
|
session = db_apis.get_session()
|
2482
|
-
lock_session = db_apis.get_session(
|
2624
|
+
lock_session = db_apis.get_session()
|
2483
2625
|
try:
|
2484
2626
|
self.repos.check_quota_met(session,
|
2485
2627
|
lock_session,
|
@@ -2509,7 +2651,7 @@ class DecrementListenerQuota(BaseDatabaseTask):
|
|
2509
2651
|
LOG.debug("Decrementing listener quota for "
|
2510
2652
|
"project: %s ", project_id)
|
2511
2653
|
|
2512
|
-
lock_session = db_apis.get_session(
|
2654
|
+
lock_session = db_apis.get_session()
|
2513
2655
|
try:
|
2514
2656
|
self.repos.decrement_quota(lock_session,
|
2515
2657
|
data_models.Listener,
|
@@ -2537,7 +2679,7 @@ class DecrementListenerQuota(BaseDatabaseTask):
|
|
2537
2679
|
|
2538
2680
|
try:
|
2539
2681
|
session = db_apis.get_session()
|
2540
|
-
lock_session = db_apis.get_session(
|
2682
|
+
lock_session = db_apis.get_session()
|
2541
2683
|
try:
|
2542
2684
|
self.repos.check_quota_met(session,
|
2543
2685
|
lock_session,
|
@@ -2567,7 +2709,7 @@ class DecrementLoadBalancerQuota(BaseDatabaseTask):
|
|
2567
2709
|
LOG.debug("Decrementing load balancer quota for "
|
2568
2710
|
"project: %s ", project_id)
|
2569
2711
|
|
2570
|
-
lock_session = db_apis.get_session(
|
2712
|
+
lock_session = db_apis.get_session()
|
2571
2713
|
try:
|
2572
2714
|
self.repos.decrement_quota(lock_session,
|
2573
2715
|
data_models.LoadBalancer,
|
@@ -2597,7 +2739,7 @@ class DecrementLoadBalancerQuota(BaseDatabaseTask):
|
|
2597
2739
|
|
2598
2740
|
try:
|
2599
2741
|
session = db_apis.get_session()
|
2600
|
-
lock_session = db_apis.get_session(
|
2742
|
+
lock_session = db_apis.get_session()
|
2601
2743
|
try:
|
2602
2744
|
self.repos.check_quota_met(session,
|
2603
2745
|
lock_session,
|
@@ -2627,7 +2769,7 @@ class DecrementMemberQuota(BaseDatabaseTask):
|
|
2627
2769
|
LOG.debug("Decrementing member quota for "
|
2628
2770
|
"project: %s ", project_id)
|
2629
2771
|
|
2630
|
-
lock_session = db_apis.get_session(
|
2772
|
+
lock_session = db_apis.get_session()
|
2631
2773
|
try:
|
2632
2774
|
self.repos.decrement_quota(lock_session,
|
2633
2775
|
data_models.Member,
|
@@ -2656,7 +2798,7 @@ class DecrementMemberQuota(BaseDatabaseTask):
|
|
2656
2798
|
|
2657
2799
|
try:
|
2658
2800
|
session = db_apis.get_session()
|
2659
|
-
lock_session = db_apis.get_session(
|
2801
|
+
lock_session = db_apis.get_session()
|
2660
2802
|
try:
|
2661
2803
|
self.repos.check_quota_met(session,
|
2662
2804
|
lock_session,
|
@@ -2686,7 +2828,7 @@ class DecrementPoolQuota(BaseDatabaseTask):
|
|
2686
2828
|
LOG.debug("Decrementing pool quota for "
|
2687
2829
|
"project: %s ", project_id)
|
2688
2830
|
|
2689
|
-
lock_session = db_apis.get_session(
|
2831
|
+
lock_session = db_apis.get_session()
|
2690
2832
|
try:
|
2691
2833
|
self.repos.decrement_quota(lock_session,
|
2692
2834
|
data_models.Pool,
|
@@ -2729,7 +2871,7 @@ class DecrementPoolQuota(BaseDatabaseTask):
|
|
2729
2871
|
# in case other quota actions have occurred
|
2730
2872
|
try:
|
2731
2873
|
session = db_apis.get_session()
|
2732
|
-
lock_session = db_apis.get_session(
|
2874
|
+
lock_session = db_apis.get_session()
|
2733
2875
|
try:
|
2734
2876
|
self.repos.check_quota_met(session,
|
2735
2877
|
lock_session,
|
@@ -2741,7 +2883,7 @@ class DecrementPoolQuota(BaseDatabaseTask):
|
|
2741
2883
|
|
2742
2884
|
# Attempt to increment back the health monitor quota
|
2743
2885
|
if pool_child_count['HM'] > 0:
|
2744
|
-
lock_session = db_apis.get_session(
|
2886
|
+
lock_session = db_apis.get_session()
|
2745
2887
|
try:
|
2746
2888
|
self.repos.check_quota_met(session,
|
2747
2889
|
lock_session,
|
@@ -2756,7 +2898,7 @@ class DecrementPoolQuota(BaseDatabaseTask):
|
|
2756
2898
|
# should other factors have increased the in use quota
|
2757
2899
|
# before this point in the revert flow
|
2758
2900
|
for i in range(pool_child_count['member']):
|
2759
|
-
lock_session = db_apis.get_session(
|
2901
|
+
lock_session = db_apis.get_session()
|
2760
2902
|
try:
|
2761
2903
|
self.repos.check_quota_met(session,
|
2762
2904
|
lock_session,
|
@@ -2785,9 +2927,9 @@ class CountPoolChildrenForQuota(BaseDatabaseTask):
|
|
2785
2927
|
:param pool_id: pool_id of pool object to count children on
|
2786
2928
|
:returns: None
|
2787
2929
|
"""
|
2788
|
-
|
2789
|
-
|
2790
|
-
|
2930
|
+
with db_apis.session().begin() as session:
|
2931
|
+
hm_count, member_count = (
|
2932
|
+
self.pool_repo.get_children_count(session, pool_id))
|
2791
2933
|
|
2792
2934
|
return {'HM': hm_count, 'member': member_count}
|
2793
2935
|
|
@@ -2806,7 +2948,7 @@ class DecrementL7policyQuota(BaseDatabaseTask):
|
|
2806
2948
|
"""
|
2807
2949
|
LOG.debug("Decrementing l7policy quota for "
|
2808
2950
|
"project: %s ", l7policy[constants.PROJECT_ID])
|
2809
|
-
lock_session = db_apis.get_session(
|
2951
|
+
lock_session = db_apis.get_session()
|
2810
2952
|
try:
|
2811
2953
|
self.repos.decrement_quota(lock_session,
|
2812
2954
|
data_models.L7Policy,
|
@@ -2840,7 +2982,7 @@ class DecrementL7policyQuota(BaseDatabaseTask):
|
|
2840
2982
|
if not isinstance(result, failure.Failure):
|
2841
2983
|
try:
|
2842
2984
|
session = db_apis.get_session()
|
2843
|
-
lock_session = db_apis.get_session(
|
2985
|
+
lock_session = db_apis.get_session()
|
2844
2986
|
try:
|
2845
2987
|
self.repos.check_quota_met(session,
|
2846
2988
|
lock_session,
|
@@ -2854,7 +2996,7 @@ class DecrementL7policyQuota(BaseDatabaseTask):
|
|
2854
2996
|
if db_l7policy:
|
2855
2997
|
# Attempt to increment back the L7Rule quota
|
2856
2998
|
for i in range(len(db_l7policy.l7rules)):
|
2857
|
-
lock_session = db_apis.get_session(
|
2999
|
+
lock_session = db_apis.get_session()
|
2858
3000
|
try:
|
2859
3001
|
self.repos.check_quota_met(
|
2860
3002
|
session, lock_session, data_models.L7Rule,
|
@@ -2883,7 +3025,7 @@ class DecrementL7ruleQuota(BaseDatabaseTask):
|
|
2883
3025
|
LOG.debug("Decrementing l7rule quota for "
|
2884
3026
|
"project: %s ", l7rule[constants.PROJECT_ID])
|
2885
3027
|
|
2886
|
-
lock_session = db_apis.get_session(
|
3028
|
+
lock_session = db_apis.get_session()
|
2887
3029
|
try:
|
2888
3030
|
self.repos.decrement_quota(lock_session,
|
2889
3031
|
data_models.L7Rule,
|
@@ -2912,7 +3054,7 @@ class DecrementL7ruleQuota(BaseDatabaseTask):
|
|
2912
3054
|
|
2913
3055
|
try:
|
2914
3056
|
session = db_apis.get_session()
|
2915
|
-
lock_session = db_apis.get_session(
|
3057
|
+
lock_session = db_apis.get_session()
|
2916
3058
|
try:
|
2917
3059
|
self.repos.check_quota_met(session,
|
2918
3060
|
lock_session,
|
@@ -2943,6 +3085,6 @@ class UpdatePoolMembersOperatingStatusInDB(BaseDatabaseTask):
|
|
2943
3085
|
LOG.debug("Updating member operating status to %(status)s in DB for "
|
2944
3086
|
"pool id: %(pool)s", {'status': operating_status,
|
2945
3087
|
'pool': pool_id})
|
2946
|
-
|
2947
|
-
|
2948
|
-
|
3088
|
+
with db_apis.session().begin() as session:
|
3089
|
+
self.member_repo.update_pool_members(
|
3090
|
+
session, pool_id, operating_status=operating_status)
|