octavia 12.0.0.0rc2__py3-none-any.whl → 13.0.0.0rc1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- octavia/amphorae/backends/agent/api_server/osutils.py +1 -0
- octavia/amphorae/backends/agent/api_server/plug.py +21 -7
- octavia/amphorae/backends/agent/api_server/templates/amphora-netns.systemd.j2 +2 -2
- octavia/amphorae/backends/agent/api_server/util.py +21 -0
- octavia/amphorae/backends/health_daemon/health_daemon.py +9 -3
- octavia/amphorae/backends/health_daemon/health_sender.py +2 -0
- octavia/amphorae/backends/utils/interface.py +14 -6
- octavia/amphorae/backends/utils/interface_file.py +6 -3
- octavia/amphorae/backends/utils/keepalivedlvs_query.py +8 -9
- octavia/amphorae/drivers/driver_base.py +1 -2
- octavia/amphorae/drivers/haproxy/rest_api_driver.py +11 -25
- octavia/amphorae/drivers/health/heartbeat_udp.py +34 -24
- octavia/amphorae/drivers/keepalived/jinja/jinja_cfg.py +3 -12
- octavia/amphorae/drivers/noop_driver/driver.py +3 -5
- octavia/api/common/pagination.py +4 -4
- octavia/api/drivers/amphora_driver/v2/driver.py +11 -5
- octavia/api/drivers/driver_agent/driver_get.py +22 -14
- octavia/api/drivers/driver_agent/driver_updater.py +8 -4
- octavia/api/drivers/utils.py +4 -2
- octavia/api/healthcheck/healthcheck_plugins.py +4 -2
- octavia/api/root_controller.py +4 -1
- octavia/api/v2/controllers/amphora.py +35 -38
- octavia/api/v2/controllers/availability_zone_profiles.py +43 -33
- octavia/api/v2/controllers/availability_zones.py +22 -18
- octavia/api/v2/controllers/flavor_profiles.py +37 -28
- octavia/api/v2/controllers/flavors.py +19 -15
- octavia/api/v2/controllers/health_monitor.py +44 -33
- octavia/api/v2/controllers/l7policy.py +52 -40
- octavia/api/v2/controllers/l7rule.py +68 -55
- octavia/api/v2/controllers/listener.py +88 -61
- octavia/api/v2/controllers/load_balancer.py +52 -34
- octavia/api/v2/controllers/member.py +63 -52
- octavia/api/v2/controllers/pool.py +55 -42
- octavia/api/v2/controllers/quotas.py +5 -3
- octavia/api/v2/types/listener.py +15 -0
- octavia/cmd/octavia_worker.py +0 -3
- octavia/cmd/status.py +1 -4
- octavia/common/clients.py +25 -45
- octavia/common/config.py +64 -22
- octavia/common/constants.py +3 -2
- octavia/common/data_models.py +7 -1
- octavia/common/jinja/haproxy/combined_listeners/jinja_cfg.py +12 -1
- octavia/common/jinja/haproxy/combined_listeners/templates/macros.j2 +5 -2
- octavia/common/jinja/lvs/jinja_cfg.py +4 -2
- octavia/common/keystone.py +58 -5
- octavia/common/validate.py +35 -0
- octavia/compute/drivers/noop_driver/driver.py +6 -0
- octavia/controller/healthmanager/health_manager.py +3 -6
- octavia/controller/housekeeping/house_keeping.py +36 -37
- octavia/controller/worker/amphora_rate_limit.py +5 -4
- octavia/controller/worker/task_utils.py +57 -41
- octavia/controller/worker/v2/controller_worker.py +160 -103
- octavia/controller/worker/v2/flows/listener_flows.py +3 -0
- octavia/controller/worker/v2/flows/load_balancer_flows.py +9 -14
- octavia/controller/worker/v2/tasks/amphora_driver_tasks.py +152 -91
- octavia/controller/worker/v2/tasks/compute_tasks.py +4 -2
- octavia/controller/worker/v2/tasks/database_tasks.py +542 -400
- octavia/controller/worker/v2/tasks/network_tasks.py +119 -79
- octavia/db/api.py +26 -23
- octavia/db/base_models.py +2 -2
- octavia/db/healthcheck.py +2 -1
- octavia/db/migration/alembic_migrations/versions/632152d2d32e_add_http_strict_transport_security_.py +42 -0
- octavia/db/models.py +12 -2
- octavia/db/prepare.py +2 -0
- octavia/db/repositories.py +462 -482
- octavia/hacking/checks.py +1 -1
- octavia/network/base.py +0 -14
- octavia/network/drivers/neutron/allowed_address_pairs.py +92 -135
- octavia/network/drivers/neutron/base.py +65 -77
- octavia/network/drivers/neutron/utils.py +69 -85
- octavia/network/drivers/noop_driver/driver.py +0 -7
- octavia/statistics/drivers/update_db.py +10 -10
- octavia/tests/common/constants.py +91 -84
- octavia/tests/common/sample_data_models.py +13 -1
- octavia/tests/fixtures.py +32 -0
- octavia/tests/functional/amphorae/backend/agent/api_server/test_server.py +9 -10
- octavia/tests/functional/api/drivers/driver_agent/test_driver_agent.py +260 -15
- octavia/tests/functional/api/test_root_controller.py +3 -28
- octavia/tests/functional/api/v2/base.py +5 -3
- octavia/tests/functional/api/v2/test_amphora.py +18 -5
- octavia/tests/functional/api/v2/test_availability_zone_profiles.py +1 -0
- octavia/tests/functional/api/v2/test_listener.py +51 -19
- octavia/tests/functional/api/v2/test_load_balancer.py +10 -1
- octavia/tests/functional/db/base.py +31 -16
- octavia/tests/functional/db/test_models.py +27 -28
- octavia/tests/functional/db/test_repositories.py +407 -50
- octavia/tests/unit/amphorae/backends/agent/api_server/test_amphora_info.py +2 -0
- octavia/tests/unit/amphorae/backends/agent/api_server/test_osutils.py +1 -1
- octavia/tests/unit/amphorae/backends/agent/api_server/test_plug.py +54 -6
- octavia/tests/unit/amphorae/backends/agent/api_server/test_util.py +35 -0
- octavia/tests/unit/amphorae/backends/health_daemon/test_health_daemon.py +8 -0
- octavia/tests/unit/amphorae/backends/health_daemon/test_health_sender.py +18 -0
- octavia/tests/unit/amphorae/backends/utils/test_interface.py +81 -0
- octavia/tests/unit/amphorae/backends/utils/test_interface_file.py +2 -0
- octavia/tests/unit/amphorae/backends/utils/test_keepalivedlvs_query.py +129 -5
- octavia/tests/unit/amphorae/drivers/haproxy/test_rest_api_driver_1_0.py +42 -20
- octavia/tests/unit/amphorae/drivers/health/test_heartbeat_udp.py +18 -20
- octavia/tests/unit/amphorae/drivers/keepalived/jinja/test_jinja_cfg.py +4 -4
- octavia/tests/unit/amphorae/drivers/noop_driver/test_driver.py +4 -1
- octavia/tests/unit/api/drivers/driver_agent/test_driver_get.py +3 -3
- octavia/tests/unit/api/drivers/driver_agent/test_driver_updater.py +11 -13
- octavia/tests/unit/base.py +6 -0
- octavia/tests/unit/cmd/test_interface.py +2 -2
- octavia/tests/unit/cmd/test_status.py +2 -2
- octavia/tests/unit/common/jinja/haproxy/combined_listeners/test_jinja_cfg.py +152 -1
- octavia/tests/unit/common/sample_configs/sample_configs_combined.py +10 -3
- octavia/tests/unit/common/test_clients.py +0 -39
- octavia/tests/unit/common/test_keystone.py +54 -0
- octavia/tests/unit/common/test_validate.py +67 -0
- octavia/tests/unit/controller/healthmanager/test_health_manager.py +8 -22
- octavia/tests/unit/controller/housekeeping/test_house_keeping.py +3 -64
- octavia/tests/unit/controller/worker/test_amphora_rate_limit.py +1 -1
- octavia/tests/unit/controller/worker/test_task_utils.py +44 -24
- octavia/tests/unit/controller/worker/v2/flows/test_load_balancer_flows.py +0 -1
- octavia/tests/unit/controller/worker/v2/tasks/test_amphora_driver_tasks.py +49 -26
- octavia/tests/unit/controller/worker/v2/tasks/test_database_tasks.py +399 -196
- octavia/tests/unit/controller/worker/v2/tasks/test_database_tasks_quota.py +37 -64
- octavia/tests/unit/controller/worker/v2/tasks/test_network_tasks.py +3 -14
- octavia/tests/unit/controller/worker/v2/test_controller_worker.py +2 -2
- octavia/tests/unit/network/drivers/neutron/test_allowed_address_pairs.py +456 -561
- octavia/tests/unit/network/drivers/neutron/test_base.py +181 -194
- octavia/tests/unit/network/drivers/neutron/test_utils.py +14 -30
- octavia/tests/unit/statistics/drivers/test_update_db.py +7 -5
- {octavia-12.0.0.0rc2.data → octavia-13.0.0.0rc1.data}/data/share/octavia/README.rst +1 -1
- {octavia-12.0.0.0rc2.dist-info → octavia-13.0.0.0rc1.dist-info}/AUTHORS +4 -0
- {octavia-12.0.0.0rc2.dist-info → octavia-13.0.0.0rc1.dist-info}/METADATA +4 -4
- {octavia-12.0.0.0rc2.dist-info → octavia-13.0.0.0rc1.dist-info}/RECORD +141 -189
- {octavia-12.0.0.0rc2.dist-info → octavia-13.0.0.0rc1.dist-info}/entry_points.txt +1 -2
- octavia-13.0.0.0rc1.dist-info/pbr.json +1 -0
- octavia/api/drivers/amphora_driver/v1/__init__.py +0 -11
- octavia/api/drivers/amphora_driver/v1/driver.py +0 -547
- octavia/controller/queue/v1/__init__.py +0 -11
- octavia/controller/queue/v1/consumer.py +0 -64
- octavia/controller/queue/v1/endpoints.py +0 -160
- octavia/controller/worker/v1/__init__.py +0 -11
- octavia/controller/worker/v1/controller_worker.py +0 -1157
- octavia/controller/worker/v1/flows/__init__.py +0 -11
- octavia/controller/worker/v1/flows/amphora_flows.py +0 -610
- octavia/controller/worker/v1/flows/health_monitor_flows.py +0 -105
- octavia/controller/worker/v1/flows/l7policy_flows.py +0 -94
- octavia/controller/worker/v1/flows/l7rule_flows.py +0 -100
- octavia/controller/worker/v1/flows/listener_flows.py +0 -128
- octavia/controller/worker/v1/flows/load_balancer_flows.py +0 -692
- octavia/controller/worker/v1/flows/member_flows.py +0 -230
- octavia/controller/worker/v1/flows/pool_flows.py +0 -127
- octavia/controller/worker/v1/tasks/__init__.py +0 -11
- octavia/controller/worker/v1/tasks/amphora_driver_tasks.py +0 -453
- octavia/controller/worker/v1/tasks/cert_task.py +0 -51
- octavia/controller/worker/v1/tasks/compute_tasks.py +0 -335
- octavia/controller/worker/v1/tasks/database_tasks.py +0 -2756
- octavia/controller/worker/v1/tasks/lifecycle_tasks.py +0 -173
- octavia/controller/worker/v1/tasks/model_tasks.py +0 -41
- octavia/controller/worker/v1/tasks/network_tasks.py +0 -970
- octavia/controller/worker/v1/tasks/retry_tasks.py +0 -74
- octavia/tests/unit/api/drivers/amphora_driver/v1/__init__.py +0 -11
- octavia/tests/unit/api/drivers/amphora_driver/v1/test_driver.py +0 -824
- octavia/tests/unit/controller/queue/v1/__init__.py +0 -11
- octavia/tests/unit/controller/queue/v1/test_consumer.py +0 -61
- octavia/tests/unit/controller/queue/v1/test_endpoints.py +0 -189
- octavia/tests/unit/controller/worker/v1/__init__.py +0 -11
- octavia/tests/unit/controller/worker/v1/flows/__init__.py +0 -11
- octavia/tests/unit/controller/worker/v1/flows/test_amphora_flows.py +0 -474
- octavia/tests/unit/controller/worker/v1/flows/test_health_monitor_flows.py +0 -72
- octavia/tests/unit/controller/worker/v1/flows/test_l7policy_flows.py +0 -67
- octavia/tests/unit/controller/worker/v1/flows/test_l7rule_flows.py +0 -67
- octavia/tests/unit/controller/worker/v1/flows/test_listener_flows.py +0 -91
- octavia/tests/unit/controller/worker/v1/flows/test_load_balancer_flows.py +0 -431
- octavia/tests/unit/controller/worker/v1/flows/test_member_flows.py +0 -106
- octavia/tests/unit/controller/worker/v1/flows/test_pool_flows.py +0 -77
- octavia/tests/unit/controller/worker/v1/tasks/__init__.py +0 -11
- octavia/tests/unit/controller/worker/v1/tasks/test_amphora_driver_tasks.py +0 -792
- octavia/tests/unit/controller/worker/v1/tasks/test_cert_task.py +0 -46
- octavia/tests/unit/controller/worker/v1/tasks/test_compute_tasks.py +0 -634
- octavia/tests/unit/controller/worker/v1/tasks/test_database_tasks.py +0 -2615
- octavia/tests/unit/controller/worker/v1/tasks/test_database_tasks_quota.py +0 -415
- octavia/tests/unit/controller/worker/v1/tasks/test_lifecycle_tasks.py +0 -401
- octavia/tests/unit/controller/worker/v1/tasks/test_model_tasks.py +0 -44
- octavia/tests/unit/controller/worker/v1/tasks/test_network_tasks.py +0 -1788
- octavia/tests/unit/controller/worker/v1/tasks/test_retry_tasks.py +0 -47
- octavia/tests/unit/controller/worker/v1/test_controller_worker.py +0 -2096
- octavia-12.0.0.0rc2.dist-info/pbr.json +0 -1
- {octavia-12.0.0.0rc2.data → octavia-13.0.0.0rc1.data}/data/share/octavia/LICENSE +0 -0
- {octavia-12.0.0.0rc2.data → octavia-13.0.0.0rc1.data}/data/share/octavia/diskimage-create/README.rst +0 -0
- {octavia-12.0.0.0rc2.data → octavia-13.0.0.0rc1.data}/data/share/octavia/diskimage-create/diskimage-create.sh +0 -0
- {octavia-12.0.0.0rc2.data → octavia-13.0.0.0rc1.data}/data/share/octavia/diskimage-create/image-tests.sh +0 -0
- {octavia-12.0.0.0rc2.data → octavia-13.0.0.0rc1.data}/data/share/octavia/diskimage-create/requirements.txt +0 -0
- {octavia-12.0.0.0rc2.data → octavia-13.0.0.0rc1.data}/data/share/octavia/diskimage-create/test-requirements.txt +0 -0
- {octavia-12.0.0.0rc2.data → octavia-13.0.0.0rc1.data}/data/share/octavia/diskimage-create/tox.ini +0 -0
- {octavia-12.0.0.0rc2.data → octavia-13.0.0.0rc1.data}/data/share/octavia/diskimage-create/version.txt +0 -0
- {octavia-12.0.0.0rc2.data → octavia-13.0.0.0rc1.data}/scripts/octavia-wsgi +0 -0
- {octavia-12.0.0.0rc2.dist-info → octavia-13.0.0.0rc1.dist-info}/LICENSE +0 -0
- {octavia-12.0.0.0rc2.dist-info → octavia-13.0.0.0rc1.dist-info}/WHEEL +0 -0
- {octavia-12.0.0.0rc2.dist-info → octavia-13.0.0.0rc1.dist-info}/top_level.txt +0 -0
octavia/db/repositories.py
CHANGED
@@ -33,6 +33,7 @@ from sqlalchemy.orm import subqueryload
|
|
33
33
|
from sqlalchemy import select
|
34
34
|
from sqlalchemy.sql.expression import false
|
35
35
|
from sqlalchemy.sql import func
|
36
|
+
from sqlalchemy import text
|
36
37
|
from sqlalchemy import update
|
37
38
|
|
38
39
|
from octavia.common import constants as consts
|
@@ -76,9 +77,8 @@ class BaseRepository(object):
|
|
76
77
|
:param model_kwargs: Attributes of the model to insert.
|
77
78
|
:returns: octavia.common.data_model
|
78
79
|
"""
|
79
|
-
|
80
|
-
|
81
|
-
session.add(model)
|
80
|
+
model = self.model_class(**model_kwargs)
|
81
|
+
session.add(model)
|
82
82
|
return model.to_data_model()
|
83
83
|
|
84
84
|
def delete(self, session, **filters):
|
@@ -90,9 +90,8 @@ class BaseRepository(object):
|
|
90
90
|
:raises: sqlalchemy.orm.exc.NoResultFound
|
91
91
|
"""
|
92
92
|
model = session.query(self.model_class).filter_by(**filters).one()
|
93
|
-
|
94
|
-
|
95
|
-
session.flush()
|
93
|
+
session.delete(model)
|
94
|
+
session.flush()
|
96
95
|
|
97
96
|
def delete_batch(self, session, ids=None):
|
98
97
|
"""Batch deletes by entity ids."""
|
@@ -107,13 +106,12 @@ class BaseRepository(object):
|
|
107
106
|
:param model_kwargs: Entity attributes that should be updates.
|
108
107
|
:returns: octavia.common.data_model
|
109
108
|
"""
|
110
|
-
|
111
|
-
|
112
|
-
|
113
|
-
|
114
|
-
|
115
|
-
|
116
|
-
id=id).update(model_kwargs)
|
109
|
+
tags = model_kwargs.pop('tags', None)
|
110
|
+
if tags is not None:
|
111
|
+
resource = session.get(self.model_class, id)
|
112
|
+
resource.tags = tags
|
113
|
+
session.query(self.model_class).filter_by(
|
114
|
+
id=id).update(model_kwargs)
|
117
115
|
|
118
116
|
def get(self, session, **filters):
|
119
117
|
"""Retrieves an entity from the database.
|
@@ -249,20 +247,19 @@ class Repositories(object):
|
|
249
247
|
:returns: octavia.common.data_models.LoadBalancer
|
250
248
|
"""
|
251
249
|
additional_vip_dicts = additional_vip_dicts or []
|
252
|
-
|
253
|
-
|
254
|
-
|
255
|
-
|
256
|
-
|
257
|
-
|
258
|
-
|
259
|
-
|
260
|
-
|
261
|
-
|
262
|
-
|
263
|
-
|
264
|
-
|
265
|
-
session.add(add_vip)
|
250
|
+
if not lb_dict.get('id'):
|
251
|
+
lb_dict['id'] = uuidutils.generate_uuid()
|
252
|
+
lb = models.LoadBalancer(**lb_dict)
|
253
|
+
session.add(lb)
|
254
|
+
vip_dict['load_balancer_id'] = lb_dict['id']
|
255
|
+
vip = models.Vip(**vip_dict)
|
256
|
+
session.add(vip)
|
257
|
+
for add_vip_dict in additional_vip_dicts:
|
258
|
+
add_vip_dict['load_balancer_id'] = lb_dict['id']
|
259
|
+
add_vip_dict['network_id'] = vip_dict.get('network_id')
|
260
|
+
add_vip_dict['port_id'] = vip_dict.get('port_id')
|
261
|
+
add_vip = models.AdditionalVip(**add_vip_dict)
|
262
|
+
session.add(add_vip)
|
266
263
|
|
267
264
|
return self.load_balancer.get(session, id=lb.id)
|
268
265
|
|
@@ -276,17 +273,16 @@ class Repositories(object):
|
|
276
273
|
reference this pool as its default_pool_id
|
277
274
|
:returns: octavia.common.data_models.Pool
|
278
275
|
"""
|
279
|
-
|
280
|
-
|
281
|
-
|
282
|
-
|
283
|
-
|
284
|
-
|
285
|
-
|
286
|
-
|
287
|
-
|
288
|
-
|
289
|
-
default_pool_id=pool_dict['id'])
|
276
|
+
if not pool_dict.get('id'):
|
277
|
+
pool_dict['id'] = uuidutils.generate_uuid()
|
278
|
+
sp_dict = pool_dict.pop('session_persistence', None)
|
279
|
+
db_pool = self.pool.create(session, **pool_dict)
|
280
|
+
if sp_dict is not None and sp_dict != {}:
|
281
|
+
sp_dict['pool_id'] = pool_dict['id']
|
282
|
+
self.session_persistence.create(session, **sp_dict)
|
283
|
+
if listener_id:
|
284
|
+
self.listener.update(session, listener_id,
|
285
|
+
default_pool_id=pool_dict['id'])
|
290
286
|
|
291
287
|
# Immediate refresh, as we have found that sqlalchemy will sometimes
|
292
288
|
# cache the above query and the pool object may miss the listener_id
|
@@ -303,23 +299,23 @@ class Repositories(object):
|
|
303
299
|
:param pool_dict: Dictionary representation of a pool
|
304
300
|
:returns: octavia.common.data_models.Pool
|
305
301
|
"""
|
306
|
-
|
307
|
-
|
308
|
-
|
309
|
-
if
|
310
|
-
|
311
|
-
|
312
|
-
|
313
|
-
|
314
|
-
|
315
|
-
|
316
|
-
|
317
|
-
|
318
|
-
|
319
|
-
|
320
|
-
|
321
|
-
|
322
|
-
|
302
|
+
if 'session_persistence' in pool_dict.keys():
|
303
|
+
sp_dict = pool_dict.pop('session_persistence')
|
304
|
+
if sp_dict is None or sp_dict == {}:
|
305
|
+
if self.session_persistence.exists(session, pool_id):
|
306
|
+
self.session_persistence.delete(session,
|
307
|
+
pool_id=pool_id)
|
308
|
+
elif self.session_persistence.exists(session, pool_id):
|
309
|
+
self.session_persistence.update(session, pool_id,
|
310
|
+
**sp_dict)
|
311
|
+
else:
|
312
|
+
sp_dict['pool_id'] = pool_id
|
313
|
+
self.session_persistence.create(session, **sp_dict)
|
314
|
+
# If only the session_persistence is being updated, this will be
|
315
|
+
# empty
|
316
|
+
if pool_dict:
|
317
|
+
self.pool.update(session, pool_id, **pool_dict)
|
318
|
+
session.flush()
|
323
319
|
return self.pool.get(session, id=pool_id)
|
324
320
|
|
325
321
|
def test_and_set_lb_and_listeners_prov_status(self, session, lb_id,
|
@@ -406,8 +402,11 @@ class Repositories(object):
|
|
406
402
|
# Note: You cannot just use the current count as the in-use
|
407
403
|
# value as we don't want to lock the whole resource table
|
408
404
|
try:
|
409
|
-
quotas = lock_session.query(models.Quotas)
|
410
|
-
|
405
|
+
quotas = (lock_session.query(models.Quotas)
|
406
|
+
.filter_by(project_id=project_id)
|
407
|
+
.populate_existing()
|
408
|
+
.with_for_update()
|
409
|
+
.first())
|
411
410
|
if _class == data_models.LoadBalancer:
|
412
411
|
# Decide which quota to use
|
413
412
|
if quotas.load_balancer is None:
|
@@ -575,14 +574,17 @@ class Repositories(object):
|
|
575
574
|
|
576
575
|
# Lock the project record in the database to block other quota checks
|
577
576
|
try:
|
578
|
-
quotas = lock_session.query(models.Quotas)
|
579
|
-
|
577
|
+
quotas = (lock_session.query(models.Quotas)
|
578
|
+
.filter_by(project_id=project_id)
|
579
|
+
.populate_existing()
|
580
|
+
.with_for_update()
|
581
|
+
.first())
|
580
582
|
if not quotas:
|
581
583
|
if not CONF.api_settings.auth_strategy == consts.NOAUTH:
|
582
584
|
LOG.error('Quota decrement on %(clss)s called on '
|
583
585
|
'project: %(proj)s with no quota record in '
|
584
586
|
'the database.',
|
585
|
-
{'clss':
|
587
|
+
{'clss': _class, 'proj': project_id})
|
586
588
|
return
|
587
589
|
if _class == data_models.LoadBalancer:
|
588
590
|
if (quotas.in_use_load_balancer is not None and
|
@@ -594,7 +596,7 @@ class Repositories(object):
|
|
594
596
|
LOG.warning('Quota decrement on %(clss)s called on '
|
595
597
|
'project: %(proj)s that would cause a '
|
596
598
|
'negative quota.',
|
597
|
-
{'clss':
|
599
|
+
{'clss': _class, 'proj': project_id})
|
598
600
|
if _class == data_models.Listener:
|
599
601
|
if (quotas.in_use_listener is not None and
|
600
602
|
quotas.in_use_listener > 0):
|
@@ -605,7 +607,7 @@ class Repositories(object):
|
|
605
607
|
LOG.warning('Quota decrement on %(clss)s called on '
|
606
608
|
'project: %(proj)s that would cause a '
|
607
609
|
'negative quota.',
|
608
|
-
{'clss':
|
610
|
+
{'clss': _class, 'proj': project_id})
|
609
611
|
if _class == data_models.Pool:
|
610
612
|
if (quotas.in_use_pool is not None and
|
611
613
|
quotas.in_use_pool > 0):
|
@@ -616,7 +618,7 @@ class Repositories(object):
|
|
616
618
|
LOG.warning('Quota decrement on %(clss)s called on '
|
617
619
|
'project: %(proj)s that would cause a '
|
618
620
|
'negative quota.',
|
619
|
-
{'clss':
|
621
|
+
{'clss': _class, 'proj': project_id})
|
620
622
|
if _class == data_models.HealthMonitor:
|
621
623
|
if (quotas.in_use_health_monitor is not None and
|
622
624
|
quotas.in_use_health_monitor > 0):
|
@@ -627,7 +629,7 @@ class Repositories(object):
|
|
627
629
|
LOG.warning('Quota decrement on %(clss)s called on '
|
628
630
|
'project: %(proj)s that would cause a '
|
629
631
|
'negative quota.',
|
630
|
-
{'clss':
|
632
|
+
{'clss': _class, 'proj': project_id})
|
631
633
|
if _class == data_models.Member:
|
632
634
|
if (quotas.in_use_member is not None and
|
633
635
|
quotas.in_use_member > 0):
|
@@ -638,7 +640,7 @@ class Repositories(object):
|
|
638
640
|
LOG.warning('Quota decrement on %(clss)s called on '
|
639
641
|
'project: %(proj)s that would cause a '
|
640
642
|
'negative quota.',
|
641
|
-
{'clss':
|
643
|
+
{'clss': _class, 'proj': project_id})
|
642
644
|
if _class == data_models.L7Policy:
|
643
645
|
if (quotas.in_use_l7policy is not None and
|
644
646
|
quotas.in_use_l7policy > 0):
|
@@ -649,7 +651,7 @@ class Repositories(object):
|
|
649
651
|
LOG.warning('Quota decrement on %(clss)s called on '
|
650
652
|
'project: %(proj)s that would cause a '
|
651
653
|
'negative quota.',
|
652
|
-
{'clss':
|
654
|
+
{'clss': _class, 'proj': project_id})
|
653
655
|
if _class == data_models.L7Rule:
|
654
656
|
if (quotas.in_use_l7rule is not None and
|
655
657
|
quotas.in_use_l7rule > 0):
|
@@ -660,7 +662,7 @@ class Repositories(object):
|
|
660
662
|
LOG.warning('Quota decrement on %(clss)s called on '
|
661
663
|
'project: %(proj)s that would cause a '
|
662
664
|
'negative quota.',
|
663
|
-
{'clss':
|
665
|
+
{'clss': _class, 'proj': project_id})
|
664
666
|
except db_exception.DBDeadlock as e:
|
665
667
|
LOG.warning('Quota project lock timed out for project: %(proj)s',
|
666
668
|
{'proj': project_id})
|
@@ -673,26 +675,25 @@ class Repositories(object):
|
|
673
675
|
:param amp_id: The amphora ID to query.
|
674
676
|
:returns: An amphora stats dictionary
|
675
677
|
"""
|
676
|
-
|
677
|
-
|
678
|
-
|
679
|
-
|
680
|
-
|
681
|
-
|
682
|
-
|
683
|
-
|
684
|
-
|
685
|
-
|
686
|
-
|
687
|
-
|
688
|
-
|
689
|
-
|
690
|
-
|
691
|
-
|
692
|
-
|
693
|
-
|
694
|
-
|
695
|
-
return amp_stats
|
678
|
+
columns = (list(models.ListenerStatistics.__table__.columns) +
|
679
|
+
[models.Amphora.load_balancer_id])
|
680
|
+
amp_records = (
|
681
|
+
session.query(*columns)
|
682
|
+
.filter(models.ListenerStatistics.amphora_id == amp_id)
|
683
|
+
.filter(models.ListenerStatistics.amphora_id ==
|
684
|
+
models.Amphora.id).all())
|
685
|
+
amp_stats = []
|
686
|
+
for amp in amp_records:
|
687
|
+
amp_stat = {consts.LOADBALANCER_ID: amp.load_balancer_id,
|
688
|
+
consts.LISTENER_ID: amp.listener_id,
|
689
|
+
'id': amp.amphora_id,
|
690
|
+
consts.ACTIVE_CONNECTIONS: amp.active_connections,
|
691
|
+
consts.BYTES_IN: amp.bytes_in,
|
692
|
+
consts.BYTES_OUT: amp.bytes_out,
|
693
|
+
consts.REQUEST_ERRORS: amp.request_errors,
|
694
|
+
consts.TOTAL_CONNECTIONS: amp.total_connections}
|
695
|
+
amp_stats.append(amp_stat)
|
696
|
+
return amp_stats
|
696
697
|
|
697
698
|
|
698
699
|
class LoadBalancerRepository(BaseRepository):
|
@@ -741,22 +742,23 @@ class LoadBalancerRepository(BaseRepository):
|
|
741
742
|
:param raise_exception: If True, raise ImmutableObject on failure
|
742
743
|
:returns: bool
|
743
744
|
"""
|
744
|
-
|
745
|
-
|
746
|
-
|
747
|
-
|
748
|
-
|
749
|
-
|
750
|
-
|
751
|
-
|
752
|
-
|
753
|
-
|
754
|
-
|
755
|
-
|
756
|
-
|
757
|
-
|
758
|
-
|
759
|
-
|
745
|
+
lb = (session.query(self.model_class)
|
746
|
+
.populate_existing()
|
747
|
+
.with_for_update()
|
748
|
+
.filter_by(id=id).one())
|
749
|
+
is_delete = status == consts.PENDING_DELETE
|
750
|
+
acceptable_statuses = (
|
751
|
+
consts.DELETABLE_STATUSES
|
752
|
+
if is_delete else consts.MUTABLE_STATUSES
|
753
|
+
)
|
754
|
+
if lb.provisioning_status not in acceptable_statuses:
|
755
|
+
if raise_exception:
|
756
|
+
raise exceptions.ImmutableObject(
|
757
|
+
resource='Load Balancer', id=id)
|
758
|
+
return False
|
759
|
+
lb.provisioning_status = status
|
760
|
+
session.add(lb)
|
761
|
+
return True
|
760
762
|
|
761
763
|
def set_status_for_failover(self, session, id, status,
|
762
764
|
raise_exception=False):
|
@@ -773,17 +775,18 @@ class LoadBalancerRepository(BaseRepository):
|
|
773
775
|
:param raise_exception: If True, raise ImmutableObject on failure
|
774
776
|
:returns: bool
|
775
777
|
"""
|
776
|
-
|
777
|
-
|
778
|
-
|
779
|
-
|
780
|
-
|
781
|
-
|
782
|
-
|
783
|
-
|
784
|
-
|
785
|
-
|
786
|
-
|
778
|
+
lb = (session.query(self.model_class)
|
779
|
+
.populate_existing()
|
780
|
+
.with_for_update()
|
781
|
+
.filter_by(id=id).one())
|
782
|
+
if lb.provisioning_status not in consts.FAILOVERABLE_STATUSES:
|
783
|
+
if raise_exception:
|
784
|
+
raise exceptions.ImmutableObject(
|
785
|
+
resource='Load Balancer', id=id)
|
786
|
+
return False
|
787
|
+
lb.provisioning_status = status
|
788
|
+
session.add(lb)
|
789
|
+
return True
|
787
790
|
|
788
791
|
|
789
792
|
class VipRepository(BaseRepository):
|
@@ -791,9 +794,8 @@ class VipRepository(BaseRepository):
|
|
791
794
|
|
792
795
|
def update(self, session, load_balancer_id, **model_kwargs):
|
793
796
|
"""Updates a vip entity in the database by load_balancer_id."""
|
794
|
-
|
795
|
-
|
796
|
-
load_balancer_id=load_balancer_id).update(model_kwargs)
|
797
|
+
session.query(self.model_class).filter_by(
|
798
|
+
load_balancer_id=load_balancer_id).update(model_kwargs)
|
797
799
|
|
798
800
|
|
799
801
|
class AdditionalVipRepository(BaseRepository):
|
@@ -805,10 +807,9 @@ class AdditionalVipRepository(BaseRepository):
|
|
805
807
|
|
806
808
|
Uses load_balancer_id + subnet_id.
|
807
809
|
"""
|
808
|
-
|
809
|
-
|
810
|
-
|
811
|
-
subnet_id=subnet_id).update(model_kwargs)
|
810
|
+
session.query(self.model_class).filter_by(
|
811
|
+
load_balancer_id=load_balancer_id,
|
812
|
+
subnet_id=subnet_id).update(model_kwargs)
|
812
813
|
|
813
814
|
|
814
815
|
class HealthMonitorRepository(BaseRepository):
|
@@ -844,9 +845,8 @@ class SessionPersistenceRepository(BaseRepository):
|
|
844
845
|
|
845
846
|
def update(self, session, pool_id, **model_kwargs):
|
846
847
|
"""Updates a session persistence entity in the database by pool_id."""
|
847
|
-
|
848
|
-
|
849
|
-
pool_id=pool_id).update(model_kwargs)
|
848
|
+
session.query(self.model_class).filter_by(
|
849
|
+
pool_id=pool_id).update(model_kwargs)
|
850
850
|
|
851
851
|
def exists(self, session, pool_id):
|
852
852
|
"""Checks if session persistence exists on a pool."""
|
@@ -859,18 +859,16 @@ class ListenerCidrRepository(BaseRepository):
|
|
859
859
|
|
860
860
|
def create(self, session, listener_id, allowed_cidrs):
|
861
861
|
if allowed_cidrs:
|
862
|
-
|
863
|
-
|
864
|
-
|
865
|
-
|
866
|
-
session.add(model)
|
862
|
+
for cidr in set(allowed_cidrs):
|
863
|
+
cidr_dict = {'listener_id': listener_id, 'cidr': cidr}
|
864
|
+
model = self.model_class(**cidr_dict)
|
865
|
+
session.add(model)
|
867
866
|
|
868
867
|
def update(self, session, listener_id, allowed_cidrs):
|
869
868
|
"""Updates allowed CIDRs in the database by listener_id."""
|
870
|
-
|
871
|
-
|
872
|
-
|
873
|
-
self.create(session, listener_id, allowed_cidrs)
|
869
|
+
session.query(self.model_class).filter_by(
|
870
|
+
listener_id=listener_id).delete()
|
871
|
+
self.create(session, listener_id, allowed_cidrs)
|
874
872
|
|
875
873
|
|
876
874
|
class PoolRepository(BaseRepository):
|
@@ -959,9 +957,8 @@ class MemberRepository(BaseRepository):
|
|
959
957
|
:param model_kwargs: Entity attributes that should be updates.
|
960
958
|
:returns: octavia.common.data_model
|
961
959
|
"""
|
962
|
-
|
963
|
-
|
964
|
-
pool_id=pool_id).update(model_kwargs)
|
960
|
+
session.query(self.model_class).filter_by(
|
961
|
+
pool_id=pool_id).update(model_kwargs)
|
965
962
|
|
966
963
|
|
967
964
|
class ListenerRepository(BaseRepository):
|
@@ -1033,69 +1030,66 @@ class ListenerRepository(BaseRepository):
|
|
1033
1030
|
return bool(listener.default_pool)
|
1034
1031
|
|
1035
1032
|
def update(self, session, id, **model_kwargs):
|
1036
|
-
|
1037
|
-
|
1038
|
-
|
1039
|
-
|
1040
|
-
|
1041
|
-
|
1042
|
-
|
1043
|
-
|
1044
|
-
|
1045
|
-
|
1046
|
-
|
1047
|
-
|
1048
|
-
|
1049
|
-
|
1050
|
-
|
1051
|
-
|
1052
|
-
|
1053
|
-
|
1054
|
-
|
1055
|
-
|
1056
|
-
|
1057
|
-
|
1058
|
-
|
1059
|
-
|
1060
|
-
|
1061
|
-
|
1062
|
-
|
1063
|
-
|
1064
|
-
|
1065
|
-
|
1066
|
-
|
1067
|
-
|
1068
|
-
|
1069
|
-
listener_db.update(model_kwargs)
|
1033
|
+
listener_db = session.query(self.model_class).filter_by(
|
1034
|
+
id=id).first()
|
1035
|
+
if not listener_db:
|
1036
|
+
raise exceptions.NotFound(
|
1037
|
+
resource=data_models.Listener._name(), id=id)
|
1038
|
+
tags = model_kwargs.pop('tags', None)
|
1039
|
+
if tags is not None:
|
1040
|
+
resource = session.get(self.model_class, id)
|
1041
|
+
resource.tags = tags
|
1042
|
+
# Verify any newly specified default_pool_id exists
|
1043
|
+
default_pool_id = model_kwargs.get('default_pool_id')
|
1044
|
+
if default_pool_id:
|
1045
|
+
self._pool_check(session, default_pool_id, listener_id=id)
|
1046
|
+
if 'sni_containers' in model_kwargs:
|
1047
|
+
# sni_container_refs is being updated. It is either being set
|
1048
|
+
# or unset/cleared. We need to update in DB side.
|
1049
|
+
containers = model_kwargs.pop('sni_containers', []) or []
|
1050
|
+
listener_db.sni_containers = []
|
1051
|
+
if containers:
|
1052
|
+
listener_db.sni_containers = [
|
1053
|
+
models.SNI(listener_id=id,
|
1054
|
+
tls_container_id=container_ref)
|
1055
|
+
for container_ref in containers]
|
1056
|
+
if 'allowed_cidrs' in model_kwargs:
|
1057
|
+
# allowed_cidrs is being updated. It is either being set or
|
1058
|
+
# unset/cleared. We need to update in DB side.
|
1059
|
+
allowed_cidrs = model_kwargs.pop('allowed_cidrs', []) or []
|
1060
|
+
listener_db.allowed_cidrs = []
|
1061
|
+
if allowed_cidrs:
|
1062
|
+
listener_db.allowed_cidrs = [
|
1063
|
+
models.ListenerCidr(listener_id=id, cidr=cidr)
|
1064
|
+
for cidr in allowed_cidrs]
|
1065
|
+
listener_db.update(model_kwargs)
|
1070
1066
|
|
1071
1067
|
def create(self, session, **model_kwargs):
|
1072
1068
|
"""Creates a new Listener with some validation."""
|
1073
|
-
|
1074
|
-
|
1075
|
-
|
1076
|
-
|
1077
|
-
|
1078
|
-
|
1079
|
-
|
1080
|
-
|
1081
|
-
model.
|
1082
|
-
|
1083
|
-
|
1084
|
-
|
1085
|
-
|
1086
|
-
|
1087
|
-
session.add(model)
|
1069
|
+
listener_id = model_kwargs.get('id')
|
1070
|
+
allowed_cidrs = set(model_kwargs.pop('allowed_cidrs', []) or [])
|
1071
|
+
model_kwargs['allowed_cidrs'] = [
|
1072
|
+
models.ListenerCidr(listener_id=listener_id, cidr=cidr)
|
1073
|
+
for cidr in allowed_cidrs]
|
1074
|
+
model = self.model_class(**model_kwargs)
|
1075
|
+
if model.default_pool_id:
|
1076
|
+
model.default_pool = self._pool_check(
|
1077
|
+
session, model.default_pool_id,
|
1078
|
+
lb_id=model.load_balancer_id)
|
1079
|
+
if model.peer_port is None:
|
1080
|
+
model.peer_port = self._find_next_peer_port(
|
1081
|
+
session, lb_id=model.load_balancer_id)
|
1082
|
+
session.add(model)
|
1088
1083
|
return model.to_data_model()
|
1089
1084
|
|
1090
1085
|
def prov_status_active_if_not_error(self, session, listener_id):
|
1091
1086
|
"""Update provisioning_status to ACTIVE if not already in ERROR."""
|
1092
|
-
|
1093
|
-
|
1094
|
-
|
1095
|
-
|
1096
|
-
|
1097
|
-
|
1098
|
-
synchronize_session='fetch'))
|
1087
|
+
(session.query(self.model_class).filter_by(id=listener_id).
|
1088
|
+
# Don't mark ERROR or already ACTIVE as ACTIVE
|
1089
|
+
filter(~self.model_class.provisioning_status.in_(
|
1090
|
+
[consts.ERROR, consts.ACTIVE])).
|
1091
|
+
update({self.model_class.provisioning_status: consts.ACTIVE},
|
1092
|
+
synchronize_session='fetch'))
|
1099
1093
|
|
1100
1094
|
|
1101
1095
|
class ListenerStatisticsRepository(BaseRepository):
|
@@ -1112,19 +1106,18 @@ class ListenerStatisticsRepository(BaseRepository):
|
|
1112
1106
|
# amphora_id can't be null, so clone the listener_id
|
1113
1107
|
stats_obj.amphora_id = stats_obj.listener_id
|
1114
1108
|
|
1115
|
-
|
1116
|
-
|
1117
|
-
|
1109
|
+
# TODO(johnsom): This can be simplified/optimized using an "upsert"
|
1110
|
+
count = session.query(self.model_class).filter_by(
|
1111
|
+
listener_id=stats_obj.listener_id,
|
1112
|
+
amphora_id=stats_obj.amphora_id).count()
|
1113
|
+
if count:
|
1114
|
+
session.query(self.model_class).filter_by(
|
1118
1115
|
listener_id=stats_obj.listener_id,
|
1119
|
-
amphora_id=stats_obj.amphora_id).
|
1120
|
-
|
1121
|
-
|
1122
|
-
|
1123
|
-
|
1124
|
-
stats_obj.get_stats(),
|
1125
|
-
synchronize_session=False)
|
1126
|
-
else:
|
1127
|
-
self.create(session, **stats_obj.db_fields())
|
1116
|
+
amphora_id=stats_obj.amphora_id).update(
|
1117
|
+
stats_obj.get_stats(),
|
1118
|
+
synchronize_session=False)
|
1119
|
+
else:
|
1120
|
+
self.create(session, **stats_obj.db_fields())
|
1128
1121
|
|
1129
1122
|
def increment(self, session, delta_stats):
|
1130
1123
|
"""Updates a listener's statistics, incrementing by the passed deltas.
|
@@ -1137,21 +1130,23 @@ class ListenerStatisticsRepository(BaseRepository):
|
|
1137
1130
|
# amphora_id can't be null, so clone the listener_id
|
1138
1131
|
delta_stats.amphora_id = delta_stats.listener_id
|
1139
1132
|
|
1140
|
-
|
1141
|
-
|
1142
|
-
|
1143
|
-
|
1144
|
-
|
1145
|
-
|
1146
|
-
|
1147
|
-
|
1133
|
+
# TODO(johnsom): This can be simplified/optimized using an "upsert"
|
1134
|
+
count = session.query(self.model_class).filter_by(
|
1135
|
+
listener_id=delta_stats.listener_id,
|
1136
|
+
amphora_id=delta_stats.amphora_id).count()
|
1137
|
+
if count:
|
1138
|
+
existing_stats = (
|
1139
|
+
session.query(self.model_class)
|
1140
|
+
.populate_existing()
|
1141
|
+
.with_for_update()
|
1142
|
+
.filter_by(
|
1148
1143
|
listener_id=delta_stats.listener_id,
|
1149
|
-
amphora_id=delta_stats.amphora_id).one()
|
1150
|
-
|
1151
|
-
|
1152
|
-
|
1153
|
-
|
1154
|
-
|
1144
|
+
amphora_id=delta_stats.amphora_id).one())
|
1145
|
+
existing_stats += delta_stats
|
1146
|
+
existing_stats.active_connections = (
|
1147
|
+
delta_stats.active_connections)
|
1148
|
+
else:
|
1149
|
+
self.create(session, **delta_stats.db_fields())
|
1155
1150
|
|
1156
1151
|
def update(self, session, listener_id, **model_kwargs):
|
1157
1152
|
"""Updates a listener's statistics, overriding with the passed values.
|
@@ -1162,9 +1157,8 @@ class ListenerStatisticsRepository(BaseRepository):
|
|
1162
1157
|
:param model_kwargs: Entity attributes that should be updated
|
1163
1158
|
|
1164
1159
|
"""
|
1165
|
-
|
1166
|
-
|
1167
|
-
listener_id=listener_id).update(model_kwargs)
|
1160
|
+
session.query(self.model_class).filter_by(
|
1161
|
+
listener_id=listener_id).update(model_kwargs)
|
1168
1162
|
|
1169
1163
|
|
1170
1164
|
class AmphoraRepository(BaseRepository):
|
@@ -1200,12 +1194,11 @@ class AmphoraRepository(BaseRepository):
|
|
1200
1194
|
:param load_balancer_id: The load balancer id to associate
|
1201
1195
|
:param amphora_id: The amphora id to associate
|
1202
1196
|
"""
|
1203
|
-
|
1204
|
-
|
1205
|
-
|
1206
|
-
|
1207
|
-
|
1208
|
-
load_balancer.amphorae.append(amphora)
|
1197
|
+
load_balancer = session.query(models.LoadBalancer).filter_by(
|
1198
|
+
id=load_balancer_id).first()
|
1199
|
+
amphora = session.query(self.model_class).filter_by(
|
1200
|
+
id=amphora_id).first()
|
1201
|
+
load_balancer.amphorae.append(amphora)
|
1209
1202
|
|
1210
1203
|
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
|
1211
1204
|
def allocate_and_associate(self, session, load_balancer_id,
|
@@ -1228,17 +1221,18 @@ class AmphoraRepository(BaseRepository):
|
|
1228
1221
|
LOG.debug("Filtering amps by zone: %s", availability_zone)
|
1229
1222
|
filters['cached_zone'] = availability_zone
|
1230
1223
|
|
1231
|
-
|
1232
|
-
|
1233
|
-
|
1224
|
+
amp = (session.query(self.model_class)
|
1225
|
+
.populate_existing()
|
1226
|
+
.with_for_update()
|
1227
|
+
.filter_by(**filters).first())
|
1234
1228
|
|
1235
|
-
|
1236
|
-
|
1229
|
+
if amp is None:
|
1230
|
+
return None
|
1237
1231
|
|
1238
|
-
|
1239
|
-
|
1240
|
-
|
1241
|
-
|
1232
|
+
if availability_zone:
|
1233
|
+
LOG.debug("Found amp: %s in %s", amp.id, amp.cached_zone)
|
1234
|
+
amp.status = 'ALLOCATED'
|
1235
|
+
amp.load_balancer_id = load_balancer_id
|
1242
1236
|
|
1243
1237
|
return amp.to_data_model()
|
1244
1238
|
|
@@ -1280,18 +1274,20 @@ class AmphoraRepository(BaseRepository):
|
|
1280
1274
|
expired_date = datetime.datetime.utcnow() + datetime.timedelta(
|
1281
1275
|
seconds=expired_seconds)
|
1282
1276
|
|
1283
|
-
|
1284
|
-
|
1285
|
-
|
1286
|
-
|
1287
|
-
|
1288
|
-
|
1289
|
-
|
1290
|
-
|
1291
|
-
|
1292
|
-
|
1277
|
+
amp = (session.query(self.model_class)
|
1278
|
+
.populate_existing()
|
1279
|
+
.with_for_update()
|
1280
|
+
.filter(
|
1281
|
+
self.model_class.status.notin_(
|
1282
|
+
[consts.DELETED, consts.PENDING_DELETE]),
|
1283
|
+
self.model_class.cert_busy == false(),
|
1284
|
+
self.model_class.cert_expiration < expired_date)
|
1285
|
+
.first())
|
1286
|
+
|
1287
|
+
if amp is None:
|
1288
|
+
return None
|
1293
1289
|
|
1294
|
-
|
1290
|
+
amp.cert_busy = True
|
1295
1291
|
|
1296
1292
|
return amp.to_data_model()
|
1297
1293
|
|
@@ -1317,7 +1313,7 @@ class AmphoraRepository(BaseRepository):
|
|
1317
1313
|
:param amphora_id: The amphora ID to lookup the load balancer for.
|
1318
1314
|
:returns: A dictionary containing the required load balancer details.
|
1319
1315
|
"""
|
1320
|
-
rows = session.execute(
|
1316
|
+
rows = session.execute(text(
|
1321
1317
|
"SELECT load_balancer.id, load_balancer.enabled, "
|
1322
1318
|
"load_balancer.provisioning_status AS lb_prov_status, "
|
1323
1319
|
"load_balancer.operating_status AS lb_op_status, "
|
@@ -1335,13 +1331,13 @@ class AmphoraRepository(BaseRepository):
|
|
1335
1331
|
"LEFT JOIN pool ON load_balancer.id = pool.load_balancer_id "
|
1336
1332
|
"LEFT JOIN member ON pool.id = member.pool_id WHERE "
|
1337
1333
|
"amphora.id = :amp_id AND amphora.status != :deleted AND "
|
1338
|
-
"load_balancer.provisioning_status != :deleted;"
|
1339
|
-
|
1334
|
+
"load_balancer.provisioning_status != :deleted;").bindparams(
|
1335
|
+
amp_id=amphora_id, deleted=consts.DELETED))
|
1340
1336
|
|
1341
1337
|
lb = {}
|
1342
1338
|
listeners = {}
|
1343
1339
|
pools = {}
|
1344
|
-
for row in rows:
|
1340
|
+
for row in rows.mappings():
|
1345
1341
|
if not lb:
|
1346
1342
|
lb['id'] = row['id']
|
1347
1343
|
lb['enabled'] = row['enabled'] == 1
|
@@ -1385,8 +1381,11 @@ class AmphoraRepository(BaseRepository):
|
|
1385
1381
|
:raises NoResultFound: The amphora was not found or already deleted.
|
1386
1382
|
:returns: None
|
1387
1383
|
"""
|
1388
|
-
amp = lock_session.query(self.model_class)
|
1389
|
-
|
1384
|
+
amp = (lock_session.query(self.model_class)
|
1385
|
+
.populate_existing()
|
1386
|
+
.with_for_update()
|
1387
|
+
.filter_by(id=id)
|
1388
|
+
.filter(self.model_class.status != consts.DELETED).one())
|
1390
1389
|
if amp.status not in [consts.AMPHORA_READY, consts.ERROR]:
|
1391
1390
|
raise exceptions.ImmutableObject(resource=consts.AMPHORA, id=id)
|
1392
1391
|
amp.status = consts.PENDING_DELETE
|
@@ -1398,16 +1397,14 @@ class AmphoraBuildReqRepository(BaseRepository):
|
|
1398
1397
|
|
1399
1398
|
def add_to_build_queue(self, session, amphora_id=None, priority=None):
|
1400
1399
|
"""Adds the build request to the table."""
|
1401
|
-
|
1402
|
-
|
1403
|
-
session.add(model)
|
1400
|
+
model = self.model_class(amphora_id=amphora_id, priority=priority)
|
1401
|
+
session.add(model)
|
1404
1402
|
|
1405
1403
|
def update_req_status(self, session, amphora_id=None):
|
1406
1404
|
"""Updates the request status."""
|
1407
|
-
|
1408
|
-
|
1409
|
-
|
1410
|
-
.update({self.model_class.status: 'BUILDING'}))
|
1405
|
+
(session.query(self.model_class)
|
1406
|
+
.filter_by(amphora_id=amphora_id)
|
1407
|
+
.update({self.model_class.status: 'BUILDING'}))
|
1411
1408
|
|
1412
1409
|
def get_highest_priority_build_req(self, session):
|
1413
1410
|
"""Fetches build request with highest priority and least created_time.
|
@@ -1418,17 +1415,15 @@ class AmphoraBuildReqRepository(BaseRepository):
|
|
1418
1415
|
:returns amphora_id corresponding to highest priority and least created
|
1419
1416
|
time in 'WAITING' status.
|
1420
1417
|
"""
|
1421
|
-
|
1422
|
-
|
1423
|
-
|
1424
|
-
|
1425
|
-
|
1426
|
-
.first())[0]
|
1418
|
+
return (session.query(self.model_class.amphora_id)
|
1419
|
+
.order_by(self.model_class.status.desc())
|
1420
|
+
.order_by(self.model_class.priority.asc())
|
1421
|
+
.order_by(self.model_class.created_time.asc())
|
1422
|
+
.first())[0]
|
1427
1423
|
|
1428
1424
|
def delete_all(self, session):
|
1429
1425
|
"Deletes all the build requests."
|
1430
|
-
|
1431
|
-
session.query(self.model_class).delete()
|
1426
|
+
session.query(self.model_class).delete()
|
1432
1427
|
|
1433
1428
|
|
1434
1429
|
class AmphoraBuildSlotsRepository(BaseRepository):
|
@@ -1439,24 +1434,22 @@ class AmphoraBuildSlotsRepository(BaseRepository):
|
|
1439
1434
|
|
1440
1435
|
:returns: Number of current build slots.
|
1441
1436
|
"""
|
1442
|
-
|
1443
|
-
count = session.query(self.model_class.slots_used).one()
|
1437
|
+
count = session.query(self.model_class.slots_used).one()
|
1444
1438
|
return count[0]
|
1445
1439
|
|
1446
1440
|
def update_count(self, session, action='increment'):
|
1447
1441
|
"""Increments/Decrements/Resets the number of build_slots used."""
|
1448
|
-
|
1449
|
-
|
1450
|
-
|
1451
|
-
|
1452
|
-
|
1453
|
-
|
1454
|
-
|
1455
|
-
|
1456
|
-
|
1457
|
-
|
1458
|
-
|
1459
|
-
{self.model_class.slots_used: 0})
|
1442
|
+
if action == 'increment':
|
1443
|
+
session.query(self.model_class).filter_by(id=1).update(
|
1444
|
+
{self.model_class.slots_used:
|
1445
|
+
self.get_used_build_slots_count(session) + 1})
|
1446
|
+
elif action == 'decrement':
|
1447
|
+
session.query(self.model_class).filter_by(id=1).update(
|
1448
|
+
{self.model_class.slots_used:
|
1449
|
+
self.get_used_build_slots_count(session) - 1})
|
1450
|
+
elif action == 'reset':
|
1451
|
+
session.query(self.model_class).filter_by(id=1).update(
|
1452
|
+
{self.model_class.slots_used: 0})
|
1460
1453
|
|
1461
1454
|
|
1462
1455
|
class SNIRepository(BaseRepository):
|
@@ -1467,13 +1460,12 @@ class SNIRepository(BaseRepository):
|
|
1467
1460
|
"""Updates an SNI entity in the database."""
|
1468
1461
|
if not listener_id and tls_container_id:
|
1469
1462
|
raise exceptions.MissingArguments
|
1470
|
-
|
1471
|
-
|
1472
|
-
|
1473
|
-
|
1474
|
-
|
1475
|
-
|
1476
|
-
tls_container_id=tls_container_id).update(model_kwargs)
|
1463
|
+
if listener_id:
|
1464
|
+
session.query(self.model_class).filter_by(
|
1465
|
+
listener_id=listener_id).update(model_kwargs)
|
1466
|
+
elif tls_container_id:
|
1467
|
+
session.query(self.model_class).filter_by(
|
1468
|
+
tls_container_id=tls_container_id).update(model_kwargs)
|
1477
1469
|
|
1478
1470
|
|
1479
1471
|
class AmphoraHealthRepository(BaseRepository):
|
@@ -1481,22 +1473,20 @@ class AmphoraHealthRepository(BaseRepository):
|
|
1481
1473
|
|
1482
1474
|
def update(self, session, amphora_id, **model_kwargs):
|
1483
1475
|
"""Updates a healthmanager entity in the database by amphora_id."""
|
1484
|
-
|
1485
|
-
|
1486
|
-
amphora_id=amphora_id).update(model_kwargs)
|
1476
|
+
session.query(self.model_class).filter_by(
|
1477
|
+
amphora_id=amphora_id).update(model_kwargs)
|
1487
1478
|
|
1488
1479
|
def replace(self, session, amphora_id, **model_kwargs):
|
1489
1480
|
"""replace or insert amphora into database."""
|
1490
|
-
|
1491
|
-
|
1492
|
-
|
1493
|
-
|
1494
|
-
|
1495
|
-
|
1496
|
-
|
1497
|
-
|
1498
|
-
|
1499
|
-
self.create(session, **model_kwargs)
|
1481
|
+
count = session.query(self.model_class).filter_by(
|
1482
|
+
amphora_id=amphora_id).count()
|
1483
|
+
if count:
|
1484
|
+
session.query(self.model_class).filter_by(
|
1485
|
+
amphora_id=amphora_id).update(model_kwargs,
|
1486
|
+
synchronize_session=False)
|
1487
|
+
else:
|
1488
|
+
model_kwargs['amphora_id'] = amphora_id
|
1489
|
+
self.create(session, **model_kwargs)
|
1500
1490
|
|
1501
1491
|
def check_amphora_health_expired(self, session, amphora_id, exp_age=None):
|
1502
1492
|
"""check if a specific amphora is expired in the amphora_health table
|
@@ -1551,7 +1541,7 @@ class AmphoraHealthRepository(BaseRepository):
|
|
1551
1541
|
# Handle expired amphora
|
1552
1542
|
expired_ids_query = select(self.model_class.amphora_id).where(
|
1553
1543
|
self.model_class.busy == false()).where(
|
1554
|
-
self.model_class.last_update < expired_time)
|
1544
|
+
self.model_class.last_update < expired_time).subquery()
|
1555
1545
|
|
1556
1546
|
expired_count = lock_session.scalar(
|
1557
1547
|
select(func.count()).select_from(expired_ids_query))
|
@@ -1587,6 +1577,7 @@ class AmphoraHealthRepository(BaseRepository):
|
|
1587
1577
|
# Pick one expired amphora for automatic failover
|
1588
1578
|
amp_health = lock_session.query(
|
1589
1579
|
self.model_class
|
1580
|
+
).populate_existing(
|
1590
1581
|
).with_for_update(
|
1591
1582
|
).filter(
|
1592
1583
|
self.model_class.amphora_id.in_(expired_ids_query)
|
@@ -1633,9 +1624,8 @@ class VRRPGroupRepository(BaseRepository):
|
|
1633
1624
|
|
1634
1625
|
def update(self, session, load_balancer_id, **model_kwargs):
|
1635
1626
|
"""Updates a VRRPGroup entry for by load_balancer_id."""
|
1636
|
-
|
1637
|
-
|
1638
|
-
load_balancer_id=load_balancer_id).update(model_kwargs)
|
1627
|
+
session.query(self.model_class).filter_by(
|
1628
|
+
load_balancer_id=load_balancer_id).update(model_kwargs)
|
1639
1629
|
|
1640
1630
|
|
1641
1631
|
class L7RuleRepository(BaseRepository):
|
@@ -1666,42 +1656,40 @@ class L7RuleRepository(BaseRepository):
|
|
1666
1656
|
query_options=query_options, **filters)
|
1667
1657
|
|
1668
1658
|
def update(self, session, id, **model_kwargs):
|
1669
|
-
|
1670
|
-
|
1671
|
-
|
1672
|
-
|
1673
|
-
|
1674
|
-
|
1675
|
-
|
1676
|
-
|
1677
|
-
|
1678
|
-
|
1679
|
-
|
1680
|
-
|
1681
|
-
|
1682
|
-
|
1683
|
-
|
1684
|
-
|
1685
|
-
|
1686
|
-
|
1687
|
-
|
1688
|
-
|
1689
|
-
l7rule_db.update(model_kwargs)
|
1659
|
+
l7rule_db = session.query(self.model_class).filter_by(
|
1660
|
+
id=id).first()
|
1661
|
+
if not l7rule_db:
|
1662
|
+
raise exceptions.NotFound(
|
1663
|
+
resource=data_models.L7Rule._name(), id=id)
|
1664
|
+
|
1665
|
+
l7rule_dict = l7rule_db.to_data_model().to_dict()
|
1666
|
+
# Ignore values that are None
|
1667
|
+
for k, v in model_kwargs.items():
|
1668
|
+
if v is not None:
|
1669
|
+
l7rule_dict.update({k: v})
|
1670
|
+
# Clear out the 'key' attribute for rule types that don't use it.
|
1671
|
+
if ('type' in l7rule_dict.keys() and
|
1672
|
+
l7rule_dict['type'] in (consts.L7RULE_TYPE_HOST_NAME,
|
1673
|
+
consts.L7RULE_TYPE_PATH,
|
1674
|
+
consts.L7RULE_TYPE_FILE_TYPE)):
|
1675
|
+
l7rule_dict['key'] = None
|
1676
|
+
model_kwargs.update({'key': None})
|
1677
|
+
validate.l7rule_data(self.model_class(**l7rule_dict))
|
1678
|
+
l7rule_db.update(model_kwargs)
|
1690
1679
|
|
1691
1680
|
l7rule_db = self.get(session, id=id)
|
1692
1681
|
return l7rule_db
|
1693
1682
|
|
1694
1683
|
def create(self, session, **model_kwargs):
|
1695
|
-
|
1696
|
-
|
1697
|
-
|
1698
|
-
|
1699
|
-
|
1700
|
-
|
1701
|
-
|
1702
|
-
|
1703
|
-
|
1704
|
-
session.add(l7rule)
|
1684
|
+
if not model_kwargs.get('id'):
|
1685
|
+
model_kwargs.update(id=uuidutils.generate_uuid())
|
1686
|
+
if model_kwargs.get('l7policy_id'):
|
1687
|
+
l7policy_db = session.query(models.L7Policy).filter_by(
|
1688
|
+
id=model_kwargs.get('l7policy_id')).first()
|
1689
|
+
model_kwargs.update(l7policy=l7policy_db)
|
1690
|
+
l7rule = self.model_class(**model_kwargs)
|
1691
|
+
validate.l7rule_data(l7rule)
|
1692
|
+
session.add(l7rule)
|
1705
1693
|
|
1706
1694
|
l7rule_db = self.get(session, id=l7rule.id)
|
1707
1695
|
return l7rule_db
|
@@ -1776,46 +1764,45 @@ class L7PolicyRepository(BaseRepository):
|
|
1776
1764
|
return data_model_list, links
|
1777
1765
|
|
1778
1766
|
def update(self, session, id, **model_kwargs):
|
1779
|
-
|
1780
|
-
|
1781
|
-
|
1782
|
-
|
1783
|
-
|
1784
|
-
|
1785
|
-
|
1786
|
-
|
1787
|
-
|
1788
|
-
|
1789
|
-
|
1790
|
-
|
1791
|
-
|
1792
|
-
|
1793
|
-
|
1794
|
-
|
1795
|
-
|
1796
|
-
|
1797
|
-
|
1798
|
-
|
1799
|
-
|
1800
|
-
|
1801
|
-
|
1802
|
-
|
1803
|
-
|
1804
|
-
|
1805
|
-
|
1806
|
-
|
1807
|
-
|
1808
|
-
|
1809
|
-
|
1810
|
-
|
1811
|
-
|
1812
|
-
|
1813
|
-
|
1814
|
-
|
1815
|
-
|
1816
|
-
|
1817
|
-
|
1818
|
-
l7policy_db.update(model_kwargs)
|
1767
|
+
l7policy_db = session.query(self.model_class).filter_by(
|
1768
|
+
id=id).first()
|
1769
|
+
if not l7policy_db:
|
1770
|
+
raise exceptions.NotFound(
|
1771
|
+
resource=data_models.L7Policy._name(), id=id)
|
1772
|
+
|
1773
|
+
# Necessary to work around unexpected / idiotic behavior of
|
1774
|
+
# the SQLAlchemy Orderinglist extension if the position changes.
|
1775
|
+
position = model_kwargs.pop('position', None)
|
1776
|
+
if position == l7policy_db.position:
|
1777
|
+
position = None
|
1778
|
+
|
1779
|
+
model_kwargs.update(listener_id=l7policy_db.listener_id)
|
1780
|
+
l7policy = self.model_class(
|
1781
|
+
**validate.sanitize_l7policy_api_args(model_kwargs))
|
1782
|
+
self._validate_l7policy_pool_data(session, l7policy)
|
1783
|
+
|
1784
|
+
if l7policy.action:
|
1785
|
+
model_kwargs.update(action=l7policy.action)
|
1786
|
+
if l7policy.action == consts.L7POLICY_ACTION_REJECT:
|
1787
|
+
model_kwargs.update(redirect_url=None)
|
1788
|
+
model_kwargs.update(redirect_pool_id=None)
|
1789
|
+
model_kwargs.update(redirect_prefix=None)
|
1790
|
+
model_kwargs.update(redirect_http_code=None)
|
1791
|
+
elif (l7policy.action ==
|
1792
|
+
consts.L7POLICY_ACTION_REDIRECT_TO_URL):
|
1793
|
+
model_kwargs.update(redirect_pool_id=None)
|
1794
|
+
model_kwargs.update(redirect_prefix=None)
|
1795
|
+
elif (l7policy.action ==
|
1796
|
+
consts.L7POLICY_ACTION_REDIRECT_TO_POOL):
|
1797
|
+
model_kwargs.update(redirect_url=None)
|
1798
|
+
model_kwargs.update(redirect_prefix=None)
|
1799
|
+
model_kwargs.update(redirect_http_code=None)
|
1800
|
+
elif (l7policy.action ==
|
1801
|
+
consts.L7POLICY_ACTION_REDIRECT_PREFIX):
|
1802
|
+
model_kwargs.update(redirect_url=None)
|
1803
|
+
model_kwargs.update(redirect_pool_id=None)
|
1804
|
+
|
1805
|
+
l7policy_db.update(model_kwargs)
|
1819
1806
|
|
1820
1807
|
# Position manipulation must happen outside the other alterations
|
1821
1808
|
# in the previous transaction
|
@@ -1825,38 +1812,36 @@ class L7PolicyRepository(BaseRepository):
|
|
1825
1812
|
# Immediate refresh, as we have found that sqlalchemy will
|
1826
1813
|
# sometimes cache the above query
|
1827
1814
|
session.refresh(listener)
|
1828
|
-
|
1829
|
-
|
1830
|
-
listener.l7policies.insert(position - 1, l7policy_db)
|
1815
|
+
l7policy_db = listener.l7policies.pop(l7policy_db.position - 1)
|
1816
|
+
listener.l7policies.insert(position - 1, l7policy_db)
|
1831
1817
|
listener.l7policies.reorder()
|
1832
1818
|
session.flush()
|
1833
1819
|
|
1834
1820
|
return self.get(session, id=id)
|
1835
1821
|
|
1836
1822
|
def create(self, session, **model_kwargs):
|
1837
|
-
|
1838
|
-
|
1839
|
-
|
1840
|
-
|
1841
|
-
|
1842
|
-
|
1843
|
-
|
1844
|
-
|
1845
|
-
|
1846
|
-
|
1847
|
-
|
1848
|
-
|
1849
|
-
|
1850
|
-
|
1851
|
-
|
1852
|
-
|
1853
|
-
|
1854
|
-
|
1855
|
-
|
1856
|
-
|
1857
|
-
|
1858
|
-
|
1859
|
-
session.flush()
|
1823
|
+
# We must append the new policy to the end of the collection. We
|
1824
|
+
# later re-insert it wherever it was requested to appear in order.
|
1825
|
+
# This is to work around unexpected / idiotic behavior of the
|
1826
|
+
# SQLAlchemy orderinglist extension.
|
1827
|
+
position = model_kwargs.pop('position', None)
|
1828
|
+
model_kwargs.update(position=consts.MAX_POLICY_POSITION)
|
1829
|
+
if not model_kwargs.get('id'):
|
1830
|
+
model_kwargs.update(id=uuidutils.generate_uuid())
|
1831
|
+
if model_kwargs.get('redirect_pool_id'):
|
1832
|
+
pool_db = session.query(models.Pool).filter_by(
|
1833
|
+
id=model_kwargs.get('redirect_pool_id')).first()
|
1834
|
+
model_kwargs.update(redirect_pool=pool_db)
|
1835
|
+
if model_kwargs.get('listener_id'):
|
1836
|
+
listener_db = session.query(models.Listener).filter_by(
|
1837
|
+
id=model_kwargs.get('listener_id')).first()
|
1838
|
+
model_kwargs.update(listener=listener_db)
|
1839
|
+
l7policy = self.model_class(
|
1840
|
+
**validate.sanitize_l7policy_api_args(model_kwargs,
|
1841
|
+
create=True))
|
1842
|
+
self._validate_l7policy_pool_data(session, l7policy)
|
1843
|
+
session.add(l7policy)
|
1844
|
+
session.flush()
|
1860
1845
|
|
1861
1846
|
# Must be done outside the transaction which creates the L7Policy
|
1862
1847
|
listener = (session.query(models.Listener).
|
@@ -1867,33 +1852,30 @@ class L7PolicyRepository(BaseRepository):
|
|
1867
1852
|
session.refresh(l7policy)
|
1868
1853
|
|
1869
1854
|
if position is not None and position < len(listener.l7policies) + 1:
|
1870
|
-
|
1871
|
-
|
1872
|
-
|
1873
|
-
listener.l7policies.insert(position - 1, l7policy_db)
|
1855
|
+
# New L7Policy will always be at the end of the list
|
1856
|
+
l7policy_db = listener.l7policies.pop()
|
1857
|
+
listener.l7policies.insert(position - 1, l7policy_db)
|
1874
1858
|
|
1875
1859
|
listener.l7policies.reorder()
|
1876
1860
|
session.flush()
|
1877
|
-
|
1878
|
-
l7policy.updated_at = None
|
1861
|
+
l7policy.updated_at = None
|
1879
1862
|
return self.get(session, id=l7policy.id)
|
1880
1863
|
|
1881
1864
|
def delete(self, session, id, **filters):
|
1882
|
-
|
1883
|
-
|
1884
|
-
|
1885
|
-
|
1886
|
-
|
1887
|
-
|
1888
|
-
|
1889
|
-
|
1890
|
-
session.flush()
|
1865
|
+
l7policy_db = session.query(self.model_class).filter_by(
|
1866
|
+
id=id).first()
|
1867
|
+
if not l7policy_db:
|
1868
|
+
raise exceptions.NotFound(
|
1869
|
+
resource=data_models.L7Policy._name(), id=id)
|
1870
|
+
listener_id = l7policy_db.listener_id
|
1871
|
+
session.delete(l7policy_db)
|
1872
|
+
session.flush()
|
1891
1873
|
|
1892
1874
|
# Must do reorder outside of the delete transaction.
|
1893
1875
|
listener = (session.query(models.Listener).
|
1894
1876
|
filter_by(id=listener_id).first())
|
1895
|
-
# Immediate refresh, as we have found that sqlalchemy will
|
1896
|
-
# cache the above query
|
1877
|
+
# Immediate refresh, as we have found that sqlalchemy will
|
1878
|
+
# sometimes cache the above query
|
1897
1879
|
session.refresh(listener)
|
1898
1880
|
listener.l7policies.reorder()
|
1899
1881
|
session.flush()
|
@@ -1902,41 +1884,44 @@ class L7PolicyRepository(BaseRepository):
|
|
1902
1884
|
class QuotasRepository(BaseRepository):
|
1903
1885
|
model_class = models.Quotas
|
1904
1886
|
|
1905
|
-
# This is used with an autocommit session (non-lock_session)
|
1906
1887
|
# Since this is for the initial quota record creation it locks the table
|
1907
1888
|
# which can lead to recoverable deadlocks. Thus we use the deadlock
|
1908
1889
|
# retry wrapper here. This may not be appropriate for other sessions
|
1909
1890
|
# and or queries. Use with caution.
|
1910
1891
|
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
|
1911
1892
|
def update(self, session, project_id, **model_kwargs):
|
1912
|
-
|
1913
|
-
|
1914
|
-
|
1915
|
-
|
1916
|
-
|
1917
|
-
|
1893
|
+
kwargs_quota = model_kwargs['quota']
|
1894
|
+
quotas = (
|
1895
|
+
session.query(self.model_class)
|
1896
|
+
.filter_by(project_id=project_id)
|
1897
|
+
.populate_existing()
|
1898
|
+
.with_for_update().first())
|
1899
|
+
if not quotas:
|
1900
|
+
quotas = models.Quotas(project_id=project_id)
|
1918
1901
|
|
1919
|
-
|
1920
|
-
|
1921
|
-
|
1922
|
-
|
1902
|
+
for key, val in kwargs_quota.items():
|
1903
|
+
setattr(quotas, key, val)
|
1904
|
+
session.add(quotas)
|
1905
|
+
session.flush()
|
1923
1906
|
return self.get(session, project_id=project_id)
|
1924
1907
|
|
1925
1908
|
def delete(self, session, project_id):
|
1926
|
-
|
1927
|
-
|
1928
|
-
|
1929
|
-
|
1930
|
-
|
1931
|
-
|
1932
|
-
|
1933
|
-
|
1934
|
-
|
1935
|
-
|
1936
|
-
|
1937
|
-
|
1938
|
-
|
1939
|
-
|
1909
|
+
quotas = (
|
1910
|
+
session.query(self.model_class)
|
1911
|
+
.filter_by(project_id=project_id)
|
1912
|
+
.populate_existing()
|
1913
|
+
.with_for_update().first())
|
1914
|
+
if not quotas:
|
1915
|
+
raise exceptions.NotFound(
|
1916
|
+
resource=data_models.Quotas._name(), id=project_id)
|
1917
|
+
quotas.health_monitor = None
|
1918
|
+
quotas.load_balancer = None
|
1919
|
+
quotas.listener = None
|
1920
|
+
quotas.member = None
|
1921
|
+
quotas.pool = None
|
1922
|
+
quotas.l7policy = None
|
1923
|
+
quotas.l7rule = None
|
1924
|
+
session.flush()
|
1940
1925
|
|
1941
1926
|
|
1942
1927
|
class _GetALLExceptDELETEDIdMixin(object):
|
@@ -1978,23 +1963,21 @@ class FlavorRepository(_GetALLExceptDELETEDIdMixin, BaseRepository):
|
|
1978
1963
|
model_class = models.Flavor
|
1979
1964
|
|
1980
1965
|
def get_flavor_metadata_dict(self, session, flavor_id):
|
1981
|
-
|
1982
|
-
|
1983
|
-
|
1984
|
-
|
1985
|
-
.
|
1986
|
-
|
1987
|
-
|
1988
|
-
|
1989
|
-
|
1990
|
-
return result_dict
|
1966
|
+
flavor_metadata_json = (
|
1967
|
+
session.query(models.FlavorProfile.flavor_data)
|
1968
|
+
.filter(models.Flavor.id == flavor_id)
|
1969
|
+
.filter(
|
1970
|
+
models.Flavor.flavor_profile_id == models.FlavorProfile.id)
|
1971
|
+
.one()[0])
|
1972
|
+
result_dict = ({} if flavor_metadata_json is None
|
1973
|
+
else jsonutils.loads(flavor_metadata_json))
|
1974
|
+
return result_dict
|
1991
1975
|
|
1992
1976
|
def get_flavor_provider(self, session, flavor_id):
|
1993
|
-
|
1994
|
-
|
1995
|
-
|
1996
|
-
|
1997
|
-
models.FlavorProfile.id).one()[0])
|
1977
|
+
return (session.query(models.FlavorProfile.provider_name)
|
1978
|
+
.filter(models.Flavor.id == flavor_id)
|
1979
|
+
.filter(models.Flavor.flavor_profile_id ==
|
1980
|
+
models.FlavorProfile.id).one()[0])
|
1998
1981
|
|
1999
1982
|
def delete(self, serial_session, **filters):
|
2000
1983
|
"""Sets DELETED LBs flavor_id to NIL_UUID, then removes the flavor
|
@@ -2024,27 +2007,25 @@ class AvailabilityZoneRepository(_GetALLExceptDELETEDIdMixin, BaseRepository):
|
|
2024
2007
|
|
2025
2008
|
def get_availability_zone_metadata_dict(self, session,
|
2026
2009
|
availability_zone_name):
|
2027
|
-
|
2028
|
-
|
2029
|
-
|
2030
|
-
|
2031
|
-
|
2032
|
-
|
2033
|
-
|
2034
|
-
|
2035
|
-
|
2036
|
-
|
2037
|
-
|
2038
|
-
return result_dict
|
2010
|
+
availability_zone_metadata_json = (
|
2011
|
+
session.query(
|
2012
|
+
models.AvailabilityZoneProfile.availability_zone_data)
|
2013
|
+
.filter(models.AvailabilityZone.name == availability_zone_name)
|
2014
|
+
.filter(models.AvailabilityZone.availability_zone_profile_id ==
|
2015
|
+
models.AvailabilityZoneProfile.id)
|
2016
|
+
.one()[0])
|
2017
|
+
result_dict = (
|
2018
|
+
{} if availability_zone_metadata_json is None
|
2019
|
+
else jsonutils.loads(availability_zone_metadata_json))
|
2020
|
+
return result_dict
|
2039
2021
|
|
2040
2022
|
def get_availability_zone_provider(self, session, availability_zone_name):
|
2041
|
-
|
2042
|
-
|
2043
|
-
|
2044
|
-
|
2045
|
-
|
2046
|
-
|
2047
|
-
models.AvailabilityZoneProfile.id).one()[0])
|
2023
|
+
return (session.query(models.AvailabilityZoneProfile.provider_name)
|
2024
|
+
.filter(
|
2025
|
+
models.AvailabilityZone.name == availability_zone_name)
|
2026
|
+
.filter(
|
2027
|
+
models.AvailabilityZone.availability_zone_profile_id ==
|
2028
|
+
models.AvailabilityZoneProfile.id).one()[0])
|
2048
2029
|
|
2049
2030
|
def update(self, session, name, **model_kwargs):
|
2050
2031
|
"""Updates an entity in the database.
|
@@ -2053,9 +2034,8 @@ class AvailabilityZoneRepository(_GetALLExceptDELETEDIdMixin, BaseRepository):
|
|
2053
2034
|
:param model_kwargs: Entity attributes that should be updates.
|
2054
2035
|
:returns: octavia.common.data_model
|
2055
2036
|
"""
|
2056
|
-
|
2057
|
-
|
2058
|
-
name=name).update(model_kwargs)
|
2037
|
+
session.query(self.model_class).filter_by(
|
2038
|
+
name=name).update(model_kwargs)
|
2059
2039
|
|
2060
2040
|
def delete(self, serial_session, **filters):
|
2061
2041
|
"""Special delete method for availability_zone.
|