octavia 12.0.0.0rc2__py3-none-any.whl → 13.0.0.0rc1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- octavia/amphorae/backends/agent/api_server/osutils.py +1 -0
- octavia/amphorae/backends/agent/api_server/plug.py +21 -7
- octavia/amphorae/backends/agent/api_server/templates/amphora-netns.systemd.j2 +2 -2
- octavia/amphorae/backends/agent/api_server/util.py +21 -0
- octavia/amphorae/backends/health_daemon/health_daemon.py +9 -3
- octavia/amphorae/backends/health_daemon/health_sender.py +2 -0
- octavia/amphorae/backends/utils/interface.py +14 -6
- octavia/amphorae/backends/utils/interface_file.py +6 -3
- octavia/amphorae/backends/utils/keepalivedlvs_query.py +8 -9
- octavia/amphorae/drivers/driver_base.py +1 -2
- octavia/amphorae/drivers/haproxy/rest_api_driver.py +11 -25
- octavia/amphorae/drivers/health/heartbeat_udp.py +34 -24
- octavia/amphorae/drivers/keepalived/jinja/jinja_cfg.py +3 -12
- octavia/amphorae/drivers/noop_driver/driver.py +3 -5
- octavia/api/common/pagination.py +4 -4
- octavia/api/drivers/amphora_driver/v2/driver.py +11 -5
- octavia/api/drivers/driver_agent/driver_get.py +22 -14
- octavia/api/drivers/driver_agent/driver_updater.py +8 -4
- octavia/api/drivers/utils.py +4 -2
- octavia/api/healthcheck/healthcheck_plugins.py +4 -2
- octavia/api/root_controller.py +4 -1
- octavia/api/v2/controllers/amphora.py +35 -38
- octavia/api/v2/controllers/availability_zone_profiles.py +43 -33
- octavia/api/v2/controllers/availability_zones.py +22 -18
- octavia/api/v2/controllers/flavor_profiles.py +37 -28
- octavia/api/v2/controllers/flavors.py +19 -15
- octavia/api/v2/controllers/health_monitor.py +44 -33
- octavia/api/v2/controllers/l7policy.py +52 -40
- octavia/api/v2/controllers/l7rule.py +68 -55
- octavia/api/v2/controllers/listener.py +88 -61
- octavia/api/v2/controllers/load_balancer.py +52 -34
- octavia/api/v2/controllers/member.py +63 -52
- octavia/api/v2/controllers/pool.py +55 -42
- octavia/api/v2/controllers/quotas.py +5 -3
- octavia/api/v2/types/listener.py +15 -0
- octavia/cmd/octavia_worker.py +0 -3
- octavia/cmd/status.py +1 -4
- octavia/common/clients.py +25 -45
- octavia/common/config.py +64 -22
- octavia/common/constants.py +3 -2
- octavia/common/data_models.py +7 -1
- octavia/common/jinja/haproxy/combined_listeners/jinja_cfg.py +12 -1
- octavia/common/jinja/haproxy/combined_listeners/templates/macros.j2 +5 -2
- octavia/common/jinja/lvs/jinja_cfg.py +4 -2
- octavia/common/keystone.py +58 -5
- octavia/common/validate.py +35 -0
- octavia/compute/drivers/noop_driver/driver.py +6 -0
- octavia/controller/healthmanager/health_manager.py +3 -6
- octavia/controller/housekeeping/house_keeping.py +36 -37
- octavia/controller/worker/amphora_rate_limit.py +5 -4
- octavia/controller/worker/task_utils.py +57 -41
- octavia/controller/worker/v2/controller_worker.py +160 -103
- octavia/controller/worker/v2/flows/listener_flows.py +3 -0
- octavia/controller/worker/v2/flows/load_balancer_flows.py +9 -14
- octavia/controller/worker/v2/tasks/amphora_driver_tasks.py +152 -91
- octavia/controller/worker/v2/tasks/compute_tasks.py +4 -2
- octavia/controller/worker/v2/tasks/database_tasks.py +542 -400
- octavia/controller/worker/v2/tasks/network_tasks.py +119 -79
- octavia/db/api.py +26 -23
- octavia/db/base_models.py +2 -2
- octavia/db/healthcheck.py +2 -1
- octavia/db/migration/alembic_migrations/versions/632152d2d32e_add_http_strict_transport_security_.py +42 -0
- octavia/db/models.py +12 -2
- octavia/db/prepare.py +2 -0
- octavia/db/repositories.py +462 -482
- octavia/hacking/checks.py +1 -1
- octavia/network/base.py +0 -14
- octavia/network/drivers/neutron/allowed_address_pairs.py +92 -135
- octavia/network/drivers/neutron/base.py +65 -77
- octavia/network/drivers/neutron/utils.py +69 -85
- octavia/network/drivers/noop_driver/driver.py +0 -7
- octavia/statistics/drivers/update_db.py +10 -10
- octavia/tests/common/constants.py +91 -84
- octavia/tests/common/sample_data_models.py +13 -1
- octavia/tests/fixtures.py +32 -0
- octavia/tests/functional/amphorae/backend/agent/api_server/test_server.py +9 -10
- octavia/tests/functional/api/drivers/driver_agent/test_driver_agent.py +260 -15
- octavia/tests/functional/api/test_root_controller.py +3 -28
- octavia/tests/functional/api/v2/base.py +5 -3
- octavia/tests/functional/api/v2/test_amphora.py +18 -5
- octavia/tests/functional/api/v2/test_availability_zone_profiles.py +1 -0
- octavia/tests/functional/api/v2/test_listener.py +51 -19
- octavia/tests/functional/api/v2/test_load_balancer.py +10 -1
- octavia/tests/functional/db/base.py +31 -16
- octavia/tests/functional/db/test_models.py +27 -28
- octavia/tests/functional/db/test_repositories.py +407 -50
- octavia/tests/unit/amphorae/backends/agent/api_server/test_amphora_info.py +2 -0
- octavia/tests/unit/amphorae/backends/agent/api_server/test_osutils.py +1 -1
- octavia/tests/unit/amphorae/backends/agent/api_server/test_plug.py +54 -6
- octavia/tests/unit/amphorae/backends/agent/api_server/test_util.py +35 -0
- octavia/tests/unit/amphorae/backends/health_daemon/test_health_daemon.py +8 -0
- octavia/tests/unit/amphorae/backends/health_daemon/test_health_sender.py +18 -0
- octavia/tests/unit/amphorae/backends/utils/test_interface.py +81 -0
- octavia/tests/unit/amphorae/backends/utils/test_interface_file.py +2 -0
- octavia/tests/unit/amphorae/backends/utils/test_keepalivedlvs_query.py +129 -5
- octavia/tests/unit/amphorae/drivers/haproxy/test_rest_api_driver_1_0.py +42 -20
- octavia/tests/unit/amphorae/drivers/health/test_heartbeat_udp.py +18 -20
- octavia/tests/unit/amphorae/drivers/keepalived/jinja/test_jinja_cfg.py +4 -4
- octavia/tests/unit/amphorae/drivers/noop_driver/test_driver.py +4 -1
- octavia/tests/unit/api/drivers/driver_agent/test_driver_get.py +3 -3
- octavia/tests/unit/api/drivers/driver_agent/test_driver_updater.py +11 -13
- octavia/tests/unit/base.py +6 -0
- octavia/tests/unit/cmd/test_interface.py +2 -2
- octavia/tests/unit/cmd/test_status.py +2 -2
- octavia/tests/unit/common/jinja/haproxy/combined_listeners/test_jinja_cfg.py +152 -1
- octavia/tests/unit/common/sample_configs/sample_configs_combined.py +10 -3
- octavia/tests/unit/common/test_clients.py +0 -39
- octavia/tests/unit/common/test_keystone.py +54 -0
- octavia/tests/unit/common/test_validate.py +67 -0
- octavia/tests/unit/controller/healthmanager/test_health_manager.py +8 -22
- octavia/tests/unit/controller/housekeeping/test_house_keeping.py +3 -64
- octavia/tests/unit/controller/worker/test_amphora_rate_limit.py +1 -1
- octavia/tests/unit/controller/worker/test_task_utils.py +44 -24
- octavia/tests/unit/controller/worker/v2/flows/test_load_balancer_flows.py +0 -1
- octavia/tests/unit/controller/worker/v2/tasks/test_amphora_driver_tasks.py +49 -26
- octavia/tests/unit/controller/worker/v2/tasks/test_database_tasks.py +399 -196
- octavia/tests/unit/controller/worker/v2/tasks/test_database_tasks_quota.py +37 -64
- octavia/tests/unit/controller/worker/v2/tasks/test_network_tasks.py +3 -14
- octavia/tests/unit/controller/worker/v2/test_controller_worker.py +2 -2
- octavia/tests/unit/network/drivers/neutron/test_allowed_address_pairs.py +456 -561
- octavia/tests/unit/network/drivers/neutron/test_base.py +181 -194
- octavia/tests/unit/network/drivers/neutron/test_utils.py +14 -30
- octavia/tests/unit/statistics/drivers/test_update_db.py +7 -5
- {octavia-12.0.0.0rc2.data → octavia-13.0.0.0rc1.data}/data/share/octavia/README.rst +1 -1
- {octavia-12.0.0.0rc2.dist-info → octavia-13.0.0.0rc1.dist-info}/AUTHORS +4 -0
- {octavia-12.0.0.0rc2.dist-info → octavia-13.0.0.0rc1.dist-info}/METADATA +4 -4
- {octavia-12.0.0.0rc2.dist-info → octavia-13.0.0.0rc1.dist-info}/RECORD +141 -189
- {octavia-12.0.0.0rc2.dist-info → octavia-13.0.0.0rc1.dist-info}/entry_points.txt +1 -2
- octavia-13.0.0.0rc1.dist-info/pbr.json +1 -0
- octavia/api/drivers/amphora_driver/v1/__init__.py +0 -11
- octavia/api/drivers/amphora_driver/v1/driver.py +0 -547
- octavia/controller/queue/v1/__init__.py +0 -11
- octavia/controller/queue/v1/consumer.py +0 -64
- octavia/controller/queue/v1/endpoints.py +0 -160
- octavia/controller/worker/v1/__init__.py +0 -11
- octavia/controller/worker/v1/controller_worker.py +0 -1157
- octavia/controller/worker/v1/flows/__init__.py +0 -11
- octavia/controller/worker/v1/flows/amphora_flows.py +0 -610
- octavia/controller/worker/v1/flows/health_monitor_flows.py +0 -105
- octavia/controller/worker/v1/flows/l7policy_flows.py +0 -94
- octavia/controller/worker/v1/flows/l7rule_flows.py +0 -100
- octavia/controller/worker/v1/flows/listener_flows.py +0 -128
- octavia/controller/worker/v1/flows/load_balancer_flows.py +0 -692
- octavia/controller/worker/v1/flows/member_flows.py +0 -230
- octavia/controller/worker/v1/flows/pool_flows.py +0 -127
- octavia/controller/worker/v1/tasks/__init__.py +0 -11
- octavia/controller/worker/v1/tasks/amphora_driver_tasks.py +0 -453
- octavia/controller/worker/v1/tasks/cert_task.py +0 -51
- octavia/controller/worker/v1/tasks/compute_tasks.py +0 -335
- octavia/controller/worker/v1/tasks/database_tasks.py +0 -2756
- octavia/controller/worker/v1/tasks/lifecycle_tasks.py +0 -173
- octavia/controller/worker/v1/tasks/model_tasks.py +0 -41
- octavia/controller/worker/v1/tasks/network_tasks.py +0 -970
- octavia/controller/worker/v1/tasks/retry_tasks.py +0 -74
- octavia/tests/unit/api/drivers/amphora_driver/v1/__init__.py +0 -11
- octavia/tests/unit/api/drivers/amphora_driver/v1/test_driver.py +0 -824
- octavia/tests/unit/controller/queue/v1/__init__.py +0 -11
- octavia/tests/unit/controller/queue/v1/test_consumer.py +0 -61
- octavia/tests/unit/controller/queue/v1/test_endpoints.py +0 -189
- octavia/tests/unit/controller/worker/v1/__init__.py +0 -11
- octavia/tests/unit/controller/worker/v1/flows/__init__.py +0 -11
- octavia/tests/unit/controller/worker/v1/flows/test_amphora_flows.py +0 -474
- octavia/tests/unit/controller/worker/v1/flows/test_health_monitor_flows.py +0 -72
- octavia/tests/unit/controller/worker/v1/flows/test_l7policy_flows.py +0 -67
- octavia/tests/unit/controller/worker/v1/flows/test_l7rule_flows.py +0 -67
- octavia/tests/unit/controller/worker/v1/flows/test_listener_flows.py +0 -91
- octavia/tests/unit/controller/worker/v1/flows/test_load_balancer_flows.py +0 -431
- octavia/tests/unit/controller/worker/v1/flows/test_member_flows.py +0 -106
- octavia/tests/unit/controller/worker/v1/flows/test_pool_flows.py +0 -77
- octavia/tests/unit/controller/worker/v1/tasks/__init__.py +0 -11
- octavia/tests/unit/controller/worker/v1/tasks/test_amphora_driver_tasks.py +0 -792
- octavia/tests/unit/controller/worker/v1/tasks/test_cert_task.py +0 -46
- octavia/tests/unit/controller/worker/v1/tasks/test_compute_tasks.py +0 -634
- octavia/tests/unit/controller/worker/v1/tasks/test_database_tasks.py +0 -2615
- octavia/tests/unit/controller/worker/v1/tasks/test_database_tasks_quota.py +0 -415
- octavia/tests/unit/controller/worker/v1/tasks/test_lifecycle_tasks.py +0 -401
- octavia/tests/unit/controller/worker/v1/tasks/test_model_tasks.py +0 -44
- octavia/tests/unit/controller/worker/v1/tasks/test_network_tasks.py +0 -1788
- octavia/tests/unit/controller/worker/v1/tasks/test_retry_tasks.py +0 -47
- octavia/tests/unit/controller/worker/v1/test_controller_worker.py +0 -2096
- octavia-12.0.0.0rc2.dist-info/pbr.json +0 -1
- {octavia-12.0.0.0rc2.data → octavia-13.0.0.0rc1.data}/data/share/octavia/LICENSE +0 -0
- {octavia-12.0.0.0rc2.data → octavia-13.0.0.0rc1.data}/data/share/octavia/diskimage-create/README.rst +0 -0
- {octavia-12.0.0.0rc2.data → octavia-13.0.0.0rc1.data}/data/share/octavia/diskimage-create/diskimage-create.sh +0 -0
- {octavia-12.0.0.0rc2.data → octavia-13.0.0.0rc1.data}/data/share/octavia/diskimage-create/image-tests.sh +0 -0
- {octavia-12.0.0.0rc2.data → octavia-13.0.0.0rc1.data}/data/share/octavia/diskimage-create/requirements.txt +0 -0
- {octavia-12.0.0.0rc2.data → octavia-13.0.0.0rc1.data}/data/share/octavia/diskimage-create/test-requirements.txt +0 -0
- {octavia-12.0.0.0rc2.data → octavia-13.0.0.0rc1.data}/data/share/octavia/diskimage-create/tox.ini +0 -0
- {octavia-12.0.0.0rc2.data → octavia-13.0.0.0rc1.data}/data/share/octavia/diskimage-create/version.txt +0 -0
- {octavia-12.0.0.0rc2.data → octavia-13.0.0.0rc1.data}/scripts/octavia-wsgi +0 -0
- {octavia-12.0.0.0rc2.dist-info → octavia-13.0.0.0rc1.dist-info}/LICENSE +0 -0
- {octavia-12.0.0.0rc2.dist-info → octavia-13.0.0.0rc1.dist-info}/WHEEL +0 -0
- {octavia-12.0.0.0rc2.dist-info → octavia-13.0.0.0rc1.dist-info}/top_level.txt +0 -0
@@ -1,1157 +0,0 @@
|
|
1
|
-
# Copyright 2015 Hewlett-Packard Development Company, L.P.
|
2
|
-
#
|
3
|
-
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
4
|
-
# not use this file except in compliance with the License. You may obtain
|
5
|
-
# a copy of the License at
|
6
|
-
#
|
7
|
-
# http://www.apache.org/licenses/LICENSE-2.0
|
8
|
-
#
|
9
|
-
# Unless required by applicable law or agreed to in writing, software
|
10
|
-
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
11
|
-
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
12
|
-
# License for the specific language governing permissions and limitations
|
13
|
-
# under the License.
|
14
|
-
#
|
15
|
-
|
16
|
-
|
17
|
-
from oslo_config import cfg
|
18
|
-
from oslo_log import log as logging
|
19
|
-
from oslo_utils import excutils
|
20
|
-
from sqlalchemy.orm import exc as db_exceptions
|
21
|
-
from taskflow.listeners import logging as tf_logging
|
22
|
-
import tenacity
|
23
|
-
|
24
|
-
from octavia.common import base_taskflow
|
25
|
-
from octavia.common import constants
|
26
|
-
from octavia.common import exceptions
|
27
|
-
from octavia.common import utils
|
28
|
-
from octavia.controller.worker.v1.flows import amphora_flows
|
29
|
-
from octavia.controller.worker.v1.flows import health_monitor_flows
|
30
|
-
from octavia.controller.worker.v1.flows import l7policy_flows
|
31
|
-
from octavia.controller.worker.v1.flows import l7rule_flows
|
32
|
-
from octavia.controller.worker.v1.flows import listener_flows
|
33
|
-
from octavia.controller.worker.v1.flows import load_balancer_flows
|
34
|
-
from octavia.controller.worker.v1.flows import member_flows
|
35
|
-
from octavia.controller.worker.v1.flows import pool_flows
|
36
|
-
from octavia.db import api as db_apis
|
37
|
-
from octavia.db import repositories as repo
|
38
|
-
|
39
|
-
CONF = cfg.CONF
|
40
|
-
LOG = logging.getLogger(__name__)
|
41
|
-
|
42
|
-
|
43
|
-
def _is_provisioning_status_pending_update(lb_obj):
|
44
|
-
return not lb_obj.provisioning_status == constants.PENDING_UPDATE
|
45
|
-
|
46
|
-
|
47
|
-
class ControllerWorker(base_taskflow.BaseTaskFlowEngine):
|
48
|
-
|
49
|
-
def __init__(self):
|
50
|
-
|
51
|
-
LOG.warning("The 'amphorav1' provider is deprecated and will be "
|
52
|
-
"removed in a future release. Use the 'amphora' driver "
|
53
|
-
"instead.")
|
54
|
-
|
55
|
-
self._amphora_flows = amphora_flows.AmphoraFlows()
|
56
|
-
self._health_monitor_flows = health_monitor_flows.HealthMonitorFlows()
|
57
|
-
self._lb_flows = load_balancer_flows.LoadBalancerFlows()
|
58
|
-
self._listener_flows = listener_flows.ListenerFlows()
|
59
|
-
self._member_flows = member_flows.MemberFlows()
|
60
|
-
self._pool_flows = pool_flows.PoolFlows()
|
61
|
-
self._l7policy_flows = l7policy_flows.L7PolicyFlows()
|
62
|
-
self._l7rule_flows = l7rule_flows.L7RuleFlows()
|
63
|
-
|
64
|
-
self._amphora_repo = repo.AmphoraRepository()
|
65
|
-
self._amphora_health_repo = repo.AmphoraHealthRepository()
|
66
|
-
self._health_mon_repo = repo.HealthMonitorRepository()
|
67
|
-
self._lb_repo = repo.LoadBalancerRepository()
|
68
|
-
self._listener_repo = repo.ListenerRepository()
|
69
|
-
self._member_repo = repo.MemberRepository()
|
70
|
-
self._pool_repo = repo.PoolRepository()
|
71
|
-
self._l7policy_repo = repo.L7PolicyRepository()
|
72
|
-
self._l7rule_repo = repo.L7RuleRepository()
|
73
|
-
self._flavor_repo = repo.FlavorRepository()
|
74
|
-
self._az_repo = repo.AvailabilityZoneRepository()
|
75
|
-
|
76
|
-
super().__init__()
|
77
|
-
|
78
|
-
@tenacity.retry(
|
79
|
-
retry=(
|
80
|
-
tenacity.retry_if_result(_is_provisioning_status_pending_update) |
|
81
|
-
tenacity.retry_if_exception_type()),
|
82
|
-
wait=tenacity.wait_incrementing(
|
83
|
-
CONF.haproxy_amphora.api_db_commit_retry_initial_delay,
|
84
|
-
CONF.haproxy_amphora.api_db_commit_retry_backoff,
|
85
|
-
CONF.haproxy_amphora.api_db_commit_retry_max),
|
86
|
-
stop=tenacity.stop_after_attempt(
|
87
|
-
CONF.haproxy_amphora.api_db_commit_retry_attempts))
|
88
|
-
def _get_db_obj_until_pending_update(self, repo, id):
|
89
|
-
|
90
|
-
return repo.get(db_apis.get_session(), id=id)
|
91
|
-
|
92
|
-
def delete_amphora(self, amphora_id):
|
93
|
-
"""Deletes an existing Amphora.
|
94
|
-
|
95
|
-
:param amphora_id: ID of the amphora to delete
|
96
|
-
:returns: None
|
97
|
-
:raises AmphoraNotFound: The referenced Amphora was not found
|
98
|
-
"""
|
99
|
-
try:
|
100
|
-
amphora = self._amphora_repo.get(db_apis.get_session(),
|
101
|
-
id=amphora_id)
|
102
|
-
delete_amp_tf = self.taskflow_load(
|
103
|
-
self._amphora_flows.get_delete_amphora_flow(amphora))
|
104
|
-
with tf_logging.DynamicLoggingListener(delete_amp_tf, log=LOG):
|
105
|
-
delete_amp_tf.run()
|
106
|
-
except Exception as e:
|
107
|
-
LOG.error('Failed to delete a amphora %s due to: %s',
|
108
|
-
amphora_id, str(e))
|
109
|
-
return
|
110
|
-
LOG.info('Finished deleting amphora %s.', amphora_id)
|
111
|
-
|
112
|
-
@tenacity.retry(
|
113
|
-
retry=tenacity.retry_if_exception_type(db_exceptions.NoResultFound),
|
114
|
-
wait=tenacity.wait_incrementing(
|
115
|
-
CONF.haproxy_amphora.api_db_commit_retry_initial_delay,
|
116
|
-
CONF.haproxy_amphora.api_db_commit_retry_backoff,
|
117
|
-
CONF.haproxy_amphora.api_db_commit_retry_max),
|
118
|
-
stop=tenacity.stop_after_attempt(
|
119
|
-
CONF.haproxy_amphora.api_db_commit_retry_attempts))
|
120
|
-
def create_health_monitor(self, health_monitor_id):
|
121
|
-
"""Creates a health monitor.
|
122
|
-
|
123
|
-
:param pool_id: ID of the pool to create a health monitor on
|
124
|
-
:returns: None
|
125
|
-
:raises NoResultFound: Unable to find the object
|
126
|
-
"""
|
127
|
-
health_mon = self._health_mon_repo.get(db_apis.get_session(),
|
128
|
-
id=health_monitor_id)
|
129
|
-
if not health_mon:
|
130
|
-
LOG.warning('Failed to fetch %s %s from DB. Retrying for up to '
|
131
|
-
'60 seconds.', 'health_monitor', health_monitor_id)
|
132
|
-
raise db_exceptions.NoResultFound
|
133
|
-
|
134
|
-
pool = health_mon.pool
|
135
|
-
listeners = pool.listeners
|
136
|
-
pool.health_monitor = health_mon
|
137
|
-
load_balancer = pool.load_balancer
|
138
|
-
|
139
|
-
create_hm_tf = self.taskflow_load(
|
140
|
-
self._health_monitor_flows.get_create_health_monitor_flow(),
|
141
|
-
store={constants.HEALTH_MON: health_mon,
|
142
|
-
constants.POOL: pool,
|
143
|
-
constants.LISTENERS: listeners,
|
144
|
-
constants.LOADBALANCER: load_balancer})
|
145
|
-
with tf_logging.DynamicLoggingListener(create_hm_tf,
|
146
|
-
log=LOG):
|
147
|
-
create_hm_tf.run()
|
148
|
-
|
149
|
-
def delete_health_monitor(self, health_monitor_id):
|
150
|
-
"""Deletes a health monitor.
|
151
|
-
|
152
|
-
:param pool_id: ID of the pool to delete its health monitor
|
153
|
-
:returns: None
|
154
|
-
:raises HMNotFound: The referenced health monitor was not found
|
155
|
-
"""
|
156
|
-
health_mon = self._health_mon_repo.get(db_apis.get_session(),
|
157
|
-
id=health_monitor_id)
|
158
|
-
|
159
|
-
pool = health_mon.pool
|
160
|
-
listeners = pool.listeners
|
161
|
-
load_balancer = pool.load_balancer
|
162
|
-
|
163
|
-
delete_hm_tf = self.taskflow_load(
|
164
|
-
self._health_monitor_flows.get_delete_health_monitor_flow(),
|
165
|
-
store={constants.HEALTH_MON: health_mon,
|
166
|
-
constants.POOL: pool,
|
167
|
-
constants.LISTENERS: listeners,
|
168
|
-
constants.LOADBALANCER: load_balancer})
|
169
|
-
with tf_logging.DynamicLoggingListener(delete_hm_tf,
|
170
|
-
log=LOG):
|
171
|
-
delete_hm_tf.run()
|
172
|
-
|
173
|
-
def update_health_monitor(self, health_monitor_id, health_monitor_updates):
|
174
|
-
"""Updates a health monitor.
|
175
|
-
|
176
|
-
:param pool_id: ID of the pool to have it's health monitor updated
|
177
|
-
:param health_monitor_updates: Dict containing updated health monitor
|
178
|
-
:returns: None
|
179
|
-
:raises HMNotFound: The referenced health monitor was not found
|
180
|
-
"""
|
181
|
-
health_mon = None
|
182
|
-
try:
|
183
|
-
health_mon = self._get_db_obj_until_pending_update(
|
184
|
-
self._health_mon_repo, health_monitor_id)
|
185
|
-
except tenacity.RetryError as e:
|
186
|
-
LOG.warning('Health monitor did not go into %s in 60 seconds. '
|
187
|
-
'This either due to an in-progress Octavia upgrade '
|
188
|
-
'or an overloaded and failing database. Assuming '
|
189
|
-
'an upgrade is in progress and continuing.',
|
190
|
-
constants.PENDING_UPDATE)
|
191
|
-
health_mon = e.last_attempt.result()
|
192
|
-
|
193
|
-
pool = health_mon.pool
|
194
|
-
listeners = pool.listeners
|
195
|
-
pool.health_monitor = health_mon
|
196
|
-
load_balancer = pool.load_balancer
|
197
|
-
|
198
|
-
update_hm_tf = self.taskflow_load(
|
199
|
-
self._health_monitor_flows.get_update_health_monitor_flow(),
|
200
|
-
store={constants.HEALTH_MON: health_mon,
|
201
|
-
constants.POOL: pool,
|
202
|
-
constants.LISTENERS: listeners,
|
203
|
-
constants.LOADBALANCER: load_balancer,
|
204
|
-
constants.UPDATE_DICT: health_monitor_updates})
|
205
|
-
with tf_logging.DynamicLoggingListener(update_hm_tf,
|
206
|
-
log=LOG):
|
207
|
-
update_hm_tf.run()
|
208
|
-
|
209
|
-
@tenacity.retry(
|
210
|
-
retry=tenacity.retry_if_exception_type(db_exceptions.NoResultFound),
|
211
|
-
wait=tenacity.wait_incrementing(
|
212
|
-
CONF.haproxy_amphora.api_db_commit_retry_initial_delay,
|
213
|
-
CONF.haproxy_amphora.api_db_commit_retry_backoff,
|
214
|
-
CONF.haproxy_amphora.api_db_commit_retry_max),
|
215
|
-
stop=tenacity.stop_after_attempt(
|
216
|
-
CONF.haproxy_amphora.api_db_commit_retry_attempts))
|
217
|
-
def create_listener(self, listener_id):
|
218
|
-
"""Creates a listener.
|
219
|
-
|
220
|
-
:param listener_id: ID of the listener to create
|
221
|
-
:returns: None
|
222
|
-
:raises NoResultFound: Unable to find the object
|
223
|
-
"""
|
224
|
-
listener = self._listener_repo.get(db_apis.get_session(),
|
225
|
-
id=listener_id)
|
226
|
-
if not listener:
|
227
|
-
LOG.warning('Failed to fetch %s %s from DB. Retrying for up to '
|
228
|
-
'60 seconds.', 'listener', listener_id)
|
229
|
-
raise db_exceptions.NoResultFound
|
230
|
-
|
231
|
-
load_balancer = listener.load_balancer
|
232
|
-
listeners = load_balancer.listeners
|
233
|
-
|
234
|
-
create_listener_tf = self.taskflow_load(self._listener_flows.
|
235
|
-
get_create_listener_flow(),
|
236
|
-
store={constants.LOADBALANCER:
|
237
|
-
load_balancer,
|
238
|
-
constants.LISTENERS:
|
239
|
-
listeners})
|
240
|
-
with tf_logging.DynamicLoggingListener(create_listener_tf,
|
241
|
-
log=LOG):
|
242
|
-
create_listener_tf.run()
|
243
|
-
|
244
|
-
def delete_listener(self, listener_id):
|
245
|
-
"""Deletes a listener.
|
246
|
-
|
247
|
-
:param listener_id: ID of the listener to delete
|
248
|
-
:returns: None
|
249
|
-
:raises ListenerNotFound: The referenced listener was not found
|
250
|
-
"""
|
251
|
-
listener = self._listener_repo.get(db_apis.get_session(),
|
252
|
-
id=listener_id)
|
253
|
-
load_balancer = listener.load_balancer
|
254
|
-
|
255
|
-
delete_listener_tf = self.taskflow_load(
|
256
|
-
self._listener_flows.get_delete_listener_flow(),
|
257
|
-
store={constants.LOADBALANCER: load_balancer,
|
258
|
-
constants.LISTENER: listener})
|
259
|
-
with tf_logging.DynamicLoggingListener(delete_listener_tf,
|
260
|
-
log=LOG):
|
261
|
-
delete_listener_tf.run()
|
262
|
-
|
263
|
-
def update_listener(self, listener_id, listener_updates):
|
264
|
-
"""Updates a listener.
|
265
|
-
|
266
|
-
:param listener_id: ID of the listener to update
|
267
|
-
:param listener_updates: Dict containing updated listener attributes
|
268
|
-
:returns: None
|
269
|
-
:raises ListenerNotFound: The referenced listener was not found
|
270
|
-
"""
|
271
|
-
listener = None
|
272
|
-
try:
|
273
|
-
listener = self._get_db_obj_until_pending_update(
|
274
|
-
self._listener_repo, listener_id)
|
275
|
-
except tenacity.RetryError as e:
|
276
|
-
LOG.warning('Listener did not go into %s in 60 seconds. '
|
277
|
-
'This either due to an in-progress Octavia upgrade '
|
278
|
-
'or an overloaded and failing database. Assuming '
|
279
|
-
'an upgrade is in progress and continuing.',
|
280
|
-
constants.PENDING_UPDATE)
|
281
|
-
listener = e.last_attempt.result()
|
282
|
-
|
283
|
-
load_balancer = listener.load_balancer
|
284
|
-
|
285
|
-
update_listener_tf = self.taskflow_load(self._listener_flows.
|
286
|
-
get_update_listener_flow(),
|
287
|
-
store={constants.LISTENER:
|
288
|
-
listener,
|
289
|
-
constants.LOADBALANCER:
|
290
|
-
load_balancer,
|
291
|
-
constants.UPDATE_DICT:
|
292
|
-
listener_updates,
|
293
|
-
constants.LISTENERS:
|
294
|
-
[listener]})
|
295
|
-
with tf_logging.DynamicLoggingListener(update_listener_tf, log=LOG):
|
296
|
-
update_listener_tf.run()
|
297
|
-
|
298
|
-
@tenacity.retry(
|
299
|
-
retry=tenacity.retry_if_exception_type(db_exceptions.NoResultFound),
|
300
|
-
wait=tenacity.wait_incrementing(
|
301
|
-
CONF.haproxy_amphora.api_db_commit_retry_initial_delay,
|
302
|
-
CONF.haproxy_amphora.api_db_commit_retry_backoff,
|
303
|
-
CONF.haproxy_amphora.api_db_commit_retry_max),
|
304
|
-
stop=tenacity.stop_after_attempt(
|
305
|
-
CONF.haproxy_amphora.api_db_commit_retry_attempts))
|
306
|
-
def create_load_balancer(self, load_balancer_id, flavor=None,
|
307
|
-
availability_zone=None):
|
308
|
-
"""Creates a load balancer by allocating Amphorae.
|
309
|
-
|
310
|
-
First tries to allocate an existing Amphora in READY state.
|
311
|
-
If none are available it will attempt to build one specifically
|
312
|
-
for this load balancer.
|
313
|
-
|
314
|
-
:param load_balancer_id: ID of the load balancer to create
|
315
|
-
:returns: None
|
316
|
-
:raises NoResultFound: Unable to find the object
|
317
|
-
"""
|
318
|
-
lb = self._lb_repo.get(db_apis.get_session(), id=load_balancer_id)
|
319
|
-
if not lb:
|
320
|
-
LOG.warning('Failed to fetch %s %s from DB. Retrying for up to '
|
321
|
-
'60 seconds.', 'load_balancer', load_balancer_id)
|
322
|
-
raise db_exceptions.NoResultFound
|
323
|
-
|
324
|
-
# TODO(johnsom) convert this to octavia_lib constant flavor
|
325
|
-
# once octavia is transitioned to use octavia_lib
|
326
|
-
store = {constants.LOADBALANCER_ID: load_balancer_id,
|
327
|
-
constants.BUILD_TYPE_PRIORITY:
|
328
|
-
constants.LB_CREATE_NORMAL_PRIORITY,
|
329
|
-
constants.FLAVOR: flavor,
|
330
|
-
constants.AVAILABILITY_ZONE: availability_zone}
|
331
|
-
|
332
|
-
topology = lb.topology
|
333
|
-
|
334
|
-
if (not CONF.nova.enable_anti_affinity or
|
335
|
-
topology == constants.TOPOLOGY_SINGLE):
|
336
|
-
store[constants.SERVER_GROUP_ID] = None
|
337
|
-
|
338
|
-
store[constants.UPDATE_DICT] = {
|
339
|
-
constants.TOPOLOGY: topology
|
340
|
-
}
|
341
|
-
|
342
|
-
create_lb_flow = self._lb_flows.get_create_load_balancer_flow(
|
343
|
-
topology=topology, listeners=lb.listeners)
|
344
|
-
|
345
|
-
create_lb_tf = self.taskflow_load(create_lb_flow, store=store)
|
346
|
-
with tf_logging.DynamicLoggingListener(create_lb_tf, log=LOG):
|
347
|
-
create_lb_tf.run()
|
348
|
-
|
349
|
-
def delete_load_balancer(self, load_balancer_id, cascade=False):
|
350
|
-
"""Deletes a load balancer by de-allocating Amphorae.
|
351
|
-
|
352
|
-
:param load_balancer_id: ID of the load balancer to delete
|
353
|
-
:returns: None
|
354
|
-
:raises LBNotFound: The referenced load balancer was not found
|
355
|
-
"""
|
356
|
-
lb = self._lb_repo.get(db_apis.get_session(),
|
357
|
-
id=load_balancer_id)
|
358
|
-
|
359
|
-
if cascade:
|
360
|
-
(flow,
|
361
|
-
store) = self._lb_flows.get_cascade_delete_load_balancer_flow(lb)
|
362
|
-
else:
|
363
|
-
(flow, store) = self._lb_flows.get_delete_load_balancer_flow(lb)
|
364
|
-
store.update({constants.LOADBALANCER: lb,
|
365
|
-
constants.SERVER_GROUP_ID: lb.server_group_id})
|
366
|
-
delete_lb_tf = self.taskflow_load(flow, store=store)
|
367
|
-
|
368
|
-
with tf_logging.DynamicLoggingListener(delete_lb_tf,
|
369
|
-
log=LOG):
|
370
|
-
delete_lb_tf.run()
|
371
|
-
|
372
|
-
def update_load_balancer(self, load_balancer_id, load_balancer_updates):
|
373
|
-
"""Updates a load balancer.
|
374
|
-
|
375
|
-
:param load_balancer_id: ID of the load balancer to update
|
376
|
-
:param load_balancer_updates: Dict containing updated load balancer
|
377
|
-
:returns: None
|
378
|
-
:raises LBNotFound: The referenced load balancer was not found
|
379
|
-
"""
|
380
|
-
lb = None
|
381
|
-
try:
|
382
|
-
lb = self._get_db_obj_until_pending_update(
|
383
|
-
self._lb_repo, load_balancer_id)
|
384
|
-
except tenacity.RetryError as e:
|
385
|
-
LOG.warning('Load balancer did not go into %s in 60 seconds. '
|
386
|
-
'This either due to an in-progress Octavia upgrade '
|
387
|
-
'or an overloaded and failing database. Assuming '
|
388
|
-
'an upgrade is in progress and continuing.',
|
389
|
-
constants.PENDING_UPDATE)
|
390
|
-
lb = e.last_attempt.result()
|
391
|
-
|
392
|
-
listeners, _ = self._listener_repo.get_all(
|
393
|
-
db_apis.get_session(),
|
394
|
-
load_balancer_id=load_balancer_id)
|
395
|
-
|
396
|
-
update_lb_tf = self.taskflow_load(
|
397
|
-
self._lb_flows.get_update_load_balancer_flow(),
|
398
|
-
store={constants.LOADBALANCER: lb,
|
399
|
-
constants.LISTENERS: listeners,
|
400
|
-
constants.UPDATE_DICT: load_balancer_updates})
|
401
|
-
|
402
|
-
with tf_logging.DynamicLoggingListener(update_lb_tf,
|
403
|
-
log=LOG):
|
404
|
-
update_lb_tf.run()
|
405
|
-
|
406
|
-
@tenacity.retry(
|
407
|
-
retry=tenacity.retry_if_exception_type(db_exceptions.NoResultFound),
|
408
|
-
wait=tenacity.wait_incrementing(
|
409
|
-
CONF.haproxy_amphora.api_db_commit_retry_initial_delay,
|
410
|
-
CONF.haproxy_amphora.api_db_commit_retry_backoff,
|
411
|
-
CONF.haproxy_amphora.api_db_commit_retry_max),
|
412
|
-
stop=tenacity.stop_after_attempt(
|
413
|
-
CONF.haproxy_amphora.api_db_commit_retry_attempts))
|
414
|
-
def create_member(self, member_id):
|
415
|
-
"""Creates a pool member.
|
416
|
-
|
417
|
-
:param member_id: ID of the member to create
|
418
|
-
:returns: None
|
419
|
-
:raises NoSuitablePool: Unable to find the node pool
|
420
|
-
"""
|
421
|
-
member = self._member_repo.get(db_apis.get_session(),
|
422
|
-
id=member_id)
|
423
|
-
if not member:
|
424
|
-
LOG.warning('Failed to fetch %s %s from DB. Retrying for up to '
|
425
|
-
'60 seconds.', 'member', member_id)
|
426
|
-
raise db_exceptions.NoResultFound
|
427
|
-
|
428
|
-
pool = member.pool
|
429
|
-
listeners = pool.listeners
|
430
|
-
load_balancer = pool.load_balancer
|
431
|
-
|
432
|
-
store = {
|
433
|
-
constants.MEMBER: member,
|
434
|
-
constants.LISTENERS: listeners,
|
435
|
-
constants.LOADBALANCER: load_balancer,
|
436
|
-
constants.LOADBALANCER_ID: load_balancer.id,
|
437
|
-
constants.POOL: pool}
|
438
|
-
if load_balancer.availability_zone:
|
439
|
-
store[constants.AVAILABILITY_ZONE] = (
|
440
|
-
self._az_repo.get_availability_zone_metadata_dict(
|
441
|
-
db_apis.get_session(), load_balancer.availability_zone))
|
442
|
-
else:
|
443
|
-
store[constants.AVAILABILITY_ZONE] = {}
|
444
|
-
|
445
|
-
create_member_tf = self.taskflow_load(
|
446
|
-
self._member_flows.get_create_member_flow(),
|
447
|
-
store=store)
|
448
|
-
with tf_logging.DynamicLoggingListener(create_member_tf,
|
449
|
-
log=LOG):
|
450
|
-
create_member_tf.run()
|
451
|
-
|
452
|
-
def delete_member(self, member_id):
|
453
|
-
"""Deletes a pool member.
|
454
|
-
|
455
|
-
:param member_id: ID of the member to delete
|
456
|
-
:returns: None
|
457
|
-
:raises MemberNotFound: The referenced member was not found
|
458
|
-
"""
|
459
|
-
member = self._member_repo.get(db_apis.get_session(),
|
460
|
-
id=member_id)
|
461
|
-
pool = member.pool
|
462
|
-
listeners = pool.listeners
|
463
|
-
load_balancer = pool.load_balancer
|
464
|
-
|
465
|
-
store = {
|
466
|
-
constants.MEMBER: member,
|
467
|
-
constants.LISTENERS: listeners,
|
468
|
-
constants.LOADBALANCER: load_balancer,
|
469
|
-
constants.LOADBALANCER_ID: load_balancer.id,
|
470
|
-
constants.POOL: pool}
|
471
|
-
if load_balancer.availability_zone:
|
472
|
-
store[constants.AVAILABILITY_ZONE] = (
|
473
|
-
self._az_repo.get_availability_zone_metadata_dict(
|
474
|
-
db_apis.get_session(), load_balancer.availability_zone))
|
475
|
-
else:
|
476
|
-
store[constants.AVAILABILITY_ZONE] = {}
|
477
|
-
|
478
|
-
delete_member_tf = self.taskflow_load(
|
479
|
-
self._member_flows.get_delete_member_flow(),
|
480
|
-
store=store
|
481
|
-
)
|
482
|
-
with tf_logging.DynamicLoggingListener(delete_member_tf,
|
483
|
-
log=LOG):
|
484
|
-
delete_member_tf.run()
|
485
|
-
|
486
|
-
@tenacity.retry(
|
487
|
-
retry=tenacity.retry_if_exception_type(db_exceptions.NoResultFound),
|
488
|
-
wait=tenacity.wait_incrementing(
|
489
|
-
CONF.haproxy_amphora.api_db_commit_retry_initial_delay,
|
490
|
-
CONF.haproxy_amphora.api_db_commit_retry_backoff,
|
491
|
-
CONF.haproxy_amphora.api_db_commit_retry_max),
|
492
|
-
stop=tenacity.stop_after_attempt(
|
493
|
-
CONF.haproxy_amphora.api_db_commit_retry_attempts))
|
494
|
-
def batch_update_members(self, old_member_ids, new_member_ids,
|
495
|
-
updated_members):
|
496
|
-
new_members = [self._member_repo.get(db_apis.get_session(), id=mid)
|
497
|
-
for mid in new_member_ids]
|
498
|
-
# The API may not have commited all of the new member records yet.
|
499
|
-
# Make sure we retry looking them up.
|
500
|
-
if None in new_members or len(new_members) != len(new_member_ids):
|
501
|
-
LOG.warning('Failed to fetch one of the new members from DB. '
|
502
|
-
'Retrying for up to 60 seconds.')
|
503
|
-
raise db_exceptions.NoResultFound
|
504
|
-
old_members = [self._member_repo.get(db_apis.get_session(), id=mid)
|
505
|
-
for mid in old_member_ids]
|
506
|
-
updated_members = [
|
507
|
-
(self._member_repo.get(db_apis.get_session(), id=m.get('id')), m)
|
508
|
-
for m in updated_members]
|
509
|
-
if old_members:
|
510
|
-
pool = old_members[0].pool
|
511
|
-
elif new_members:
|
512
|
-
pool = new_members[0].pool
|
513
|
-
else:
|
514
|
-
pool = updated_members[0][0].pool
|
515
|
-
listeners = pool.listeners
|
516
|
-
load_balancer = pool.load_balancer
|
517
|
-
|
518
|
-
store = {
|
519
|
-
constants.LISTENERS: listeners,
|
520
|
-
constants.LOADBALANCER: load_balancer,
|
521
|
-
constants.LOADBALANCER_ID: load_balancer.id,
|
522
|
-
constants.POOL: pool}
|
523
|
-
if load_balancer.availability_zone:
|
524
|
-
store[constants.AVAILABILITY_ZONE] = (
|
525
|
-
self._az_repo.get_availability_zone_metadata_dict(
|
526
|
-
db_apis.get_session(), load_balancer.availability_zone))
|
527
|
-
else:
|
528
|
-
store[constants.AVAILABILITY_ZONE] = {}
|
529
|
-
|
530
|
-
batch_update_members_tf = self.taskflow_load(
|
531
|
-
self._member_flows.get_batch_update_members_flow(
|
532
|
-
old_members, new_members, updated_members),
|
533
|
-
store=store)
|
534
|
-
with tf_logging.DynamicLoggingListener(batch_update_members_tf,
|
535
|
-
log=LOG):
|
536
|
-
batch_update_members_tf.run()
|
537
|
-
|
538
|
-
def update_member(self, member_id, member_updates):
|
539
|
-
"""Updates a pool member.
|
540
|
-
|
541
|
-
:param member_id: ID of the member to update
|
542
|
-
:param member_updates: Dict containing updated member attributes
|
543
|
-
:returns: None
|
544
|
-
:raises MemberNotFound: The referenced member was not found
|
545
|
-
"""
|
546
|
-
try:
|
547
|
-
member = self._get_db_obj_until_pending_update(
|
548
|
-
self._member_repo, member_id)
|
549
|
-
except tenacity.RetryError as e:
|
550
|
-
LOG.warning('Member did not go into %s in 60 seconds. '
|
551
|
-
'This either due to an in-progress Octavia upgrade '
|
552
|
-
'or an overloaded and failing database. Assuming '
|
553
|
-
'an upgrade is in progress and continuing.',
|
554
|
-
constants.PENDING_UPDATE)
|
555
|
-
member = e.last_attempt.result()
|
556
|
-
|
557
|
-
pool = member.pool
|
558
|
-
listeners = pool.listeners
|
559
|
-
load_balancer = pool.load_balancer
|
560
|
-
|
561
|
-
store = {
|
562
|
-
constants.MEMBER: member,
|
563
|
-
constants.LISTENERS: listeners,
|
564
|
-
constants.LOADBALANCER: load_balancer,
|
565
|
-
constants.POOL: pool,
|
566
|
-
constants.UPDATE_DICT: member_updates}
|
567
|
-
if load_balancer.availability_zone:
|
568
|
-
store[constants.AVAILABILITY_ZONE] = (
|
569
|
-
self._az_repo.get_availability_zone_metadata_dict(
|
570
|
-
db_apis.get_session(), load_balancer.availability_zone))
|
571
|
-
else:
|
572
|
-
store[constants.AVAILABILITY_ZONE] = {}
|
573
|
-
|
574
|
-
update_member_tf = self.taskflow_load(
|
575
|
-
self._member_flows.get_update_member_flow(),
|
576
|
-
store=store)
|
577
|
-
with tf_logging.DynamicLoggingListener(update_member_tf,
|
578
|
-
log=LOG):
|
579
|
-
update_member_tf.run()
|
580
|
-
|
581
|
-
@tenacity.retry(
|
582
|
-
retry=tenacity.retry_if_exception_type(db_exceptions.NoResultFound),
|
583
|
-
wait=tenacity.wait_incrementing(
|
584
|
-
CONF.haproxy_amphora.api_db_commit_retry_initial_delay,
|
585
|
-
CONF.haproxy_amphora.api_db_commit_retry_backoff,
|
586
|
-
CONF.haproxy_amphora.api_db_commit_retry_max),
|
587
|
-
stop=tenacity.stop_after_attempt(
|
588
|
-
CONF.haproxy_amphora.api_db_commit_retry_attempts))
|
589
|
-
def create_pool(self, pool_id):
|
590
|
-
"""Creates a node pool.
|
591
|
-
|
592
|
-
:param pool_id: ID of the pool to create
|
593
|
-
:returns: None
|
594
|
-
:raises NoResultFound: Unable to find the object
|
595
|
-
"""
|
596
|
-
pool = self._pool_repo.get(db_apis.get_session(),
|
597
|
-
id=pool_id)
|
598
|
-
if not pool:
|
599
|
-
LOG.warning('Failed to fetch %s %s from DB. Retrying for up to '
|
600
|
-
'60 seconds.', 'pool', pool_id)
|
601
|
-
raise db_exceptions.NoResultFound
|
602
|
-
|
603
|
-
listeners = pool.listeners
|
604
|
-
load_balancer = pool.load_balancer
|
605
|
-
|
606
|
-
create_pool_tf = self.taskflow_load(self._pool_flows.
|
607
|
-
get_create_pool_flow(),
|
608
|
-
store={constants.POOL: pool,
|
609
|
-
constants.LISTENERS:
|
610
|
-
listeners,
|
611
|
-
constants.LOADBALANCER:
|
612
|
-
load_balancer})
|
613
|
-
with tf_logging.DynamicLoggingListener(create_pool_tf,
|
614
|
-
log=LOG):
|
615
|
-
create_pool_tf.run()
|
616
|
-
|
617
|
-
def delete_pool(self, pool_id):
|
618
|
-
"""Deletes a node pool.
|
619
|
-
|
620
|
-
:param pool_id: ID of the pool to delete
|
621
|
-
:returns: None
|
622
|
-
:raises PoolNotFound: The referenced pool was not found
|
623
|
-
"""
|
624
|
-
pool = self._pool_repo.get(db_apis.get_session(),
|
625
|
-
id=pool_id)
|
626
|
-
|
627
|
-
load_balancer = pool.load_balancer
|
628
|
-
listeners = pool.listeners
|
629
|
-
|
630
|
-
delete_pool_tf = self.taskflow_load(
|
631
|
-
self._pool_flows.get_delete_pool_flow(),
|
632
|
-
store={constants.POOL: pool, constants.LISTENERS: listeners,
|
633
|
-
constants.LOADBALANCER: load_balancer})
|
634
|
-
with tf_logging.DynamicLoggingListener(delete_pool_tf,
|
635
|
-
log=LOG):
|
636
|
-
delete_pool_tf.run()
|
637
|
-
|
638
|
-
def update_pool(self, pool_id, pool_updates):
|
639
|
-
"""Updates a node pool.
|
640
|
-
|
641
|
-
:param pool_id: ID of the pool to update
|
642
|
-
:param pool_updates: Dict containing updated pool attributes
|
643
|
-
:returns: None
|
644
|
-
:raises PoolNotFound: The referenced pool was not found
|
645
|
-
"""
|
646
|
-
pool = None
|
647
|
-
try:
|
648
|
-
pool = self._get_db_obj_until_pending_update(
|
649
|
-
self._pool_repo, pool_id)
|
650
|
-
except tenacity.RetryError as e:
|
651
|
-
LOG.warning('Pool did not go into %s in 60 seconds. '
|
652
|
-
'This either due to an in-progress Octavia upgrade '
|
653
|
-
'or an overloaded and failing database. Assuming '
|
654
|
-
'an upgrade is in progress and continuing.',
|
655
|
-
constants.PENDING_UPDATE)
|
656
|
-
pool = e.last_attempt.result()
|
657
|
-
|
658
|
-
listeners = pool.listeners
|
659
|
-
load_balancer = pool.load_balancer
|
660
|
-
|
661
|
-
update_pool_tf = self.taskflow_load(self._pool_flows.
|
662
|
-
get_update_pool_flow(),
|
663
|
-
store={constants.POOL: pool,
|
664
|
-
constants.LISTENERS:
|
665
|
-
listeners,
|
666
|
-
constants.LOADBALANCER:
|
667
|
-
load_balancer,
|
668
|
-
constants.UPDATE_DICT:
|
669
|
-
pool_updates})
|
670
|
-
with tf_logging.DynamicLoggingListener(update_pool_tf,
|
671
|
-
log=LOG):
|
672
|
-
update_pool_tf.run()
|
673
|
-
|
674
|
-
@tenacity.retry(
|
675
|
-
retry=tenacity.retry_if_exception_type(db_exceptions.NoResultFound),
|
676
|
-
wait=tenacity.wait_incrementing(
|
677
|
-
CONF.haproxy_amphora.api_db_commit_retry_initial_delay,
|
678
|
-
CONF.haproxy_amphora.api_db_commit_retry_backoff,
|
679
|
-
CONF.haproxy_amphora.api_db_commit_retry_max),
|
680
|
-
stop=tenacity.stop_after_attempt(
|
681
|
-
CONF.haproxy_amphora.api_db_commit_retry_attempts))
|
682
|
-
def create_l7policy(self, l7policy_id):
|
683
|
-
"""Creates an L7 Policy.
|
684
|
-
|
685
|
-
:param l7policy_id: ID of the l7policy to create
|
686
|
-
:returns: None
|
687
|
-
:raises NoResultFound: Unable to find the object
|
688
|
-
"""
|
689
|
-
l7policy = self._l7policy_repo.get(db_apis.get_session(),
|
690
|
-
id=l7policy_id)
|
691
|
-
if not l7policy:
|
692
|
-
LOG.warning('Failed to fetch %s %s from DB. Retrying for up to '
|
693
|
-
'60 seconds.', 'l7policy', l7policy_id)
|
694
|
-
raise db_exceptions.NoResultFound
|
695
|
-
|
696
|
-
listeners = [l7policy.listener]
|
697
|
-
load_balancer = l7policy.listener.load_balancer
|
698
|
-
|
699
|
-
create_l7policy_tf = self.taskflow_load(
|
700
|
-
self._l7policy_flows.get_create_l7policy_flow(),
|
701
|
-
store={constants.L7POLICY: l7policy,
|
702
|
-
constants.LISTENERS: listeners,
|
703
|
-
constants.LOADBALANCER: load_balancer})
|
704
|
-
with tf_logging.DynamicLoggingListener(create_l7policy_tf,
|
705
|
-
log=LOG):
|
706
|
-
create_l7policy_tf.run()
|
707
|
-
|
708
|
-
def delete_l7policy(self, l7policy_id):
|
709
|
-
"""Deletes an L7 policy.
|
710
|
-
|
711
|
-
:param l7policy_id: ID of the l7policy to delete
|
712
|
-
:returns: None
|
713
|
-
:raises L7PolicyNotFound: The referenced l7policy was not found
|
714
|
-
"""
|
715
|
-
l7policy = self._l7policy_repo.get(db_apis.get_session(),
|
716
|
-
id=l7policy_id)
|
717
|
-
|
718
|
-
load_balancer = l7policy.listener.load_balancer
|
719
|
-
listeners = [l7policy.listener]
|
720
|
-
|
721
|
-
delete_l7policy_tf = self.taskflow_load(
|
722
|
-
self._l7policy_flows.get_delete_l7policy_flow(),
|
723
|
-
store={constants.L7POLICY: l7policy,
|
724
|
-
constants.LISTENERS: listeners,
|
725
|
-
constants.LOADBALANCER: load_balancer})
|
726
|
-
with tf_logging.DynamicLoggingListener(delete_l7policy_tf,
|
727
|
-
log=LOG):
|
728
|
-
delete_l7policy_tf.run()
|
729
|
-
|
730
|
-
def update_l7policy(self, l7policy_id, l7policy_updates):
|
731
|
-
"""Updates an L7 policy.
|
732
|
-
|
733
|
-
:param l7policy_id: ID of the l7policy to update
|
734
|
-
:param l7policy_updates: Dict containing updated l7policy attributes
|
735
|
-
:returns: None
|
736
|
-
:raises L7PolicyNotFound: The referenced l7policy was not found
|
737
|
-
"""
|
738
|
-
l7policy = None
|
739
|
-
try:
|
740
|
-
l7policy = self._get_db_obj_until_pending_update(
|
741
|
-
self._l7policy_repo, l7policy_id)
|
742
|
-
except tenacity.RetryError as e:
|
743
|
-
LOG.warning('L7 policy did not go into %s in 60 seconds. '
|
744
|
-
'This either due to an in-progress Octavia upgrade '
|
745
|
-
'or an overloaded and failing database. Assuming '
|
746
|
-
'an upgrade is in progress and continuing.',
|
747
|
-
constants.PENDING_UPDATE)
|
748
|
-
l7policy = e.last_attempt.result()
|
749
|
-
|
750
|
-
listeners = [l7policy.listener]
|
751
|
-
load_balancer = l7policy.listener.load_balancer
|
752
|
-
|
753
|
-
update_l7policy_tf = self.taskflow_load(
|
754
|
-
self._l7policy_flows.get_update_l7policy_flow(),
|
755
|
-
store={constants.L7POLICY: l7policy,
|
756
|
-
constants.LISTENERS: listeners,
|
757
|
-
constants.LOADBALANCER: load_balancer,
|
758
|
-
constants.UPDATE_DICT: l7policy_updates})
|
759
|
-
with tf_logging.DynamicLoggingListener(update_l7policy_tf,
|
760
|
-
log=LOG):
|
761
|
-
update_l7policy_tf.run()
|
762
|
-
|
763
|
-
@tenacity.retry(
|
764
|
-
retry=tenacity.retry_if_exception_type(db_exceptions.NoResultFound),
|
765
|
-
wait=tenacity.wait_incrementing(
|
766
|
-
CONF.haproxy_amphora.api_db_commit_retry_initial_delay,
|
767
|
-
CONF.haproxy_amphora.api_db_commit_retry_backoff,
|
768
|
-
CONF.haproxy_amphora.api_db_commit_retry_max),
|
769
|
-
stop=tenacity.stop_after_attempt(
|
770
|
-
CONF.haproxy_amphora.api_db_commit_retry_attempts))
|
771
|
-
def create_l7rule(self, l7rule_id):
|
772
|
-
"""Creates an L7 Rule.
|
773
|
-
|
774
|
-
:param l7rule_id: ID of the l7rule to create
|
775
|
-
:returns: None
|
776
|
-
:raises NoResultFound: Unable to find the object
|
777
|
-
"""
|
778
|
-
l7rule = self._l7rule_repo.get(db_apis.get_session(),
|
779
|
-
id=l7rule_id)
|
780
|
-
if not l7rule:
|
781
|
-
LOG.warning('Failed to fetch %s %s from DB. Retrying for up to '
|
782
|
-
'60 seconds.', 'l7rule', l7rule_id)
|
783
|
-
raise db_exceptions.NoResultFound
|
784
|
-
|
785
|
-
l7policy = l7rule.l7policy
|
786
|
-
listeners = [l7policy.listener]
|
787
|
-
load_balancer = l7policy.listener.load_balancer
|
788
|
-
|
789
|
-
create_l7rule_tf = self.taskflow_load(
|
790
|
-
self._l7rule_flows.get_create_l7rule_flow(),
|
791
|
-
store={constants.L7RULE: l7rule,
|
792
|
-
constants.L7POLICY: l7policy,
|
793
|
-
constants.LISTENERS: listeners,
|
794
|
-
constants.LOADBALANCER: load_balancer})
|
795
|
-
with tf_logging.DynamicLoggingListener(create_l7rule_tf,
|
796
|
-
log=LOG):
|
797
|
-
create_l7rule_tf.run()
|
798
|
-
|
799
|
-
def delete_l7rule(self, l7rule_id):
|
800
|
-
"""Deletes an L7 rule.
|
801
|
-
|
802
|
-
:param l7rule_id: ID of the l7rule to delete
|
803
|
-
:returns: None
|
804
|
-
:raises L7RuleNotFound: The referenced l7rule was not found
|
805
|
-
"""
|
806
|
-
l7rule = self._l7rule_repo.get(db_apis.get_session(),
|
807
|
-
id=l7rule_id)
|
808
|
-
l7policy = l7rule.l7policy
|
809
|
-
load_balancer = l7policy.listener.load_balancer
|
810
|
-
listeners = [l7policy.listener]
|
811
|
-
|
812
|
-
delete_l7rule_tf = self.taskflow_load(
|
813
|
-
self._l7rule_flows.get_delete_l7rule_flow(),
|
814
|
-
store={constants.L7RULE: l7rule,
|
815
|
-
constants.L7POLICY: l7policy,
|
816
|
-
constants.LISTENERS: listeners,
|
817
|
-
constants.LOADBALANCER: load_balancer})
|
818
|
-
with tf_logging.DynamicLoggingListener(delete_l7rule_tf,
|
819
|
-
log=LOG):
|
820
|
-
delete_l7rule_tf.run()
|
821
|
-
|
822
|
-
def update_l7rule(self, l7rule_id, l7rule_updates):
|
823
|
-
"""Updates an L7 rule.
|
824
|
-
|
825
|
-
:param l7rule_id: ID of the l7rule to update
|
826
|
-
:param l7rule_updates: Dict containing updated l7rule attributes
|
827
|
-
:returns: None
|
828
|
-
:raises L7RuleNotFound: The referenced l7rule was not found
|
829
|
-
"""
|
830
|
-
l7rule = None
|
831
|
-
try:
|
832
|
-
l7rule = self._get_db_obj_until_pending_update(
|
833
|
-
self._l7rule_repo, l7rule_id)
|
834
|
-
except tenacity.RetryError as e:
|
835
|
-
LOG.warning('L7 rule did not go into %s in 60 seconds. '
|
836
|
-
'This either due to an in-progress Octavia upgrade '
|
837
|
-
'or an overloaded and failing database. Assuming '
|
838
|
-
'an upgrade is in progress and continuing.',
|
839
|
-
constants.PENDING_UPDATE)
|
840
|
-
l7rule = e.last_attempt.result()
|
841
|
-
|
842
|
-
l7policy = l7rule.l7policy
|
843
|
-
listeners = [l7policy.listener]
|
844
|
-
load_balancer = l7policy.listener.load_balancer
|
845
|
-
|
846
|
-
update_l7rule_tf = self.taskflow_load(
|
847
|
-
self._l7rule_flows.get_update_l7rule_flow(),
|
848
|
-
store={constants.L7RULE: l7rule,
|
849
|
-
constants.L7POLICY: l7policy,
|
850
|
-
constants.LISTENERS: listeners,
|
851
|
-
constants.LOADBALANCER: load_balancer,
|
852
|
-
constants.UPDATE_DICT: l7rule_updates})
|
853
|
-
with tf_logging.DynamicLoggingListener(update_l7rule_tf,
|
854
|
-
log=LOG):
|
855
|
-
update_l7rule_tf.run()
|
856
|
-
|
857
|
-
def failover_amphora(self, amphora_id, reraise=False):
|
858
|
-
"""Perform failover operations for an amphora.
|
859
|
-
|
860
|
-
Note: This expects the load balancer to already be in
|
861
|
-
provisioning_status=PENDING_UPDATE state.
|
862
|
-
|
863
|
-
:param amphora_id: ID for amphora to failover
|
864
|
-
:param reraise: If enabled reraise any caught exception
|
865
|
-
:returns: None
|
866
|
-
:raises octavia.common.exceptions.NotFound: The referenced amphora was
|
867
|
-
not found
|
868
|
-
"""
|
869
|
-
amphora = None
|
870
|
-
try:
|
871
|
-
amphora = self._amphora_repo.get(db_apis.get_session(),
|
872
|
-
id=amphora_id)
|
873
|
-
if amphora is None:
|
874
|
-
LOG.error('Amphora failover for amphora %s failed because '
|
875
|
-
'there is no record of this amphora in the '
|
876
|
-
'database. Check that the [house_keeping] '
|
877
|
-
'amphora_expiry_age configuration setting is not '
|
878
|
-
'too short. Skipping failover.', amphora_id)
|
879
|
-
raise exceptions.NotFound(resource=constants.AMPHORA,
|
880
|
-
id=amphora_id)
|
881
|
-
|
882
|
-
if amphora.status == constants.DELETED:
|
883
|
-
LOG.warning('Amphora %s is marked DELETED in the database but '
|
884
|
-
'was submitted for failover. Deleting it from the '
|
885
|
-
'amphora health table to exclude it from health '
|
886
|
-
'checks and skipping the failover.', amphora.id)
|
887
|
-
self._amphora_health_repo.delete(db_apis.get_session(),
|
888
|
-
amphora_id=amphora.id)
|
889
|
-
return
|
890
|
-
|
891
|
-
loadbalancer = None
|
892
|
-
if amphora.load_balancer_id:
|
893
|
-
loadbalancer = self._lb_repo.get(db_apis.get_session(),
|
894
|
-
id=amphora.load_balancer_id)
|
895
|
-
lb_amp_count = None
|
896
|
-
if loadbalancer:
|
897
|
-
if loadbalancer.topology == constants.TOPOLOGY_ACTIVE_STANDBY:
|
898
|
-
lb_amp_count = 2
|
899
|
-
elif loadbalancer.topology == constants.TOPOLOGY_SINGLE:
|
900
|
-
lb_amp_count = 1
|
901
|
-
|
902
|
-
amp_failover_flow = self._amphora_flows.get_failover_amphora_flow(
|
903
|
-
amphora, lb_amp_count)
|
904
|
-
|
905
|
-
az_metadata = {}
|
906
|
-
flavor = {}
|
907
|
-
lb_id = None
|
908
|
-
vip = None
|
909
|
-
server_group_id = None
|
910
|
-
if loadbalancer:
|
911
|
-
lb_id = loadbalancer.id
|
912
|
-
if loadbalancer.flavor_id:
|
913
|
-
flavor = self._flavor_repo.get_flavor_metadata_dict(
|
914
|
-
db_apis.get_session(), loadbalancer.flavor_id)
|
915
|
-
flavor[constants.LOADBALANCER_TOPOLOGY] = (
|
916
|
-
loadbalancer.topology)
|
917
|
-
else:
|
918
|
-
flavor = {constants.LOADBALANCER_TOPOLOGY:
|
919
|
-
loadbalancer.topology}
|
920
|
-
if loadbalancer.availability_zone:
|
921
|
-
az_metadata = (
|
922
|
-
self._az_repo.get_availability_zone_metadata_dict(
|
923
|
-
db_apis.get_session(),
|
924
|
-
loadbalancer.availability_zone))
|
925
|
-
vip = loadbalancer.vip
|
926
|
-
server_group_id = loadbalancer.server_group_id
|
927
|
-
|
928
|
-
stored_params = {constants.AVAILABILITY_ZONE: az_metadata,
|
929
|
-
constants.BUILD_TYPE_PRIORITY:
|
930
|
-
constants.LB_CREATE_FAILOVER_PRIORITY,
|
931
|
-
constants.FLAVOR: flavor,
|
932
|
-
constants.LOADBALANCER: loadbalancer,
|
933
|
-
constants.SERVER_GROUP_ID: server_group_id,
|
934
|
-
constants.LOADBALANCER_ID: lb_id,
|
935
|
-
constants.VIP: vip}
|
936
|
-
|
937
|
-
failover_amphora_tf = self.taskflow_load(amp_failover_flow,
|
938
|
-
store=stored_params)
|
939
|
-
|
940
|
-
with tf_logging.DynamicLoggingListener(failover_amphora_tf,
|
941
|
-
log=LOG):
|
942
|
-
failover_amphora_tf.run()
|
943
|
-
|
944
|
-
LOG.info("Successfully completed the failover for an amphora: %s",
|
945
|
-
{"id": amphora_id,
|
946
|
-
"load_balancer_id": lb_id,
|
947
|
-
"lb_network_ip": amphora.lb_network_ip,
|
948
|
-
"compute_id": amphora.compute_id,
|
949
|
-
"role": amphora.role})
|
950
|
-
|
951
|
-
except Exception as e:
|
952
|
-
with excutils.save_and_reraise_exception(reraise=reraise):
|
953
|
-
LOG.exception("Amphora %s failover exception: %s",
|
954
|
-
amphora_id, str(e))
|
955
|
-
self._amphora_repo.update(db_apis.get_session(),
|
956
|
-
amphora_id, status=constants.ERROR)
|
957
|
-
if amphora and amphora.load_balancer_id:
|
958
|
-
self._lb_repo.update(
|
959
|
-
db_apis.get_session(), amphora.load_balancer_id,
|
960
|
-
provisioning_status=constants.ERROR)
|
961
|
-
|
962
|
-
@staticmethod
|
963
|
-
def _get_amphorae_for_failover(load_balancer):
|
964
|
-
"""Returns an ordered list of amphora to failover.
|
965
|
-
|
966
|
-
:param load_balancer: The load balancer being failed over.
|
967
|
-
:returns: An ordered list of amphora to failover,
|
968
|
-
first amp to failover is last in the list
|
969
|
-
:raises octavia.common.exceptions.InvalidTopology: LB has an unknown
|
970
|
-
topology.
|
971
|
-
"""
|
972
|
-
if load_balancer.topology == constants.TOPOLOGY_SINGLE:
|
973
|
-
# In SINGLE topology, amp failover order does not matter
|
974
|
-
return [a for a in load_balancer.amphorae
|
975
|
-
if a.status != constants.DELETED]
|
976
|
-
|
977
|
-
if load_balancer.topology == constants.TOPOLOGY_ACTIVE_STANDBY:
|
978
|
-
# In Active/Standby we should preference the standby amp
|
979
|
-
# for failover first in case the Active is still able to pass
|
980
|
-
# traffic.
|
981
|
-
# Note: The active amp can switch at any time and in less than a
|
982
|
-
# second, so this is "best effort".
|
983
|
-
amphora_driver = utils.get_amphora_driver()
|
984
|
-
timeout_dict = {
|
985
|
-
constants.CONN_MAX_RETRIES:
|
986
|
-
CONF.haproxy_amphora.failover_connection_max_retries,
|
987
|
-
constants.CONN_RETRY_INTERVAL:
|
988
|
-
CONF.haproxy_amphora.failover_connection_retry_interval}
|
989
|
-
amps = []
|
990
|
-
selected_amp = None
|
991
|
-
for amp in load_balancer.amphorae:
|
992
|
-
if amp.status == constants.DELETED:
|
993
|
-
continue
|
994
|
-
if selected_amp is None:
|
995
|
-
try:
|
996
|
-
if amphora_driver.get_interface_from_ip(
|
997
|
-
amp, load_balancer.vip.ip_address,
|
998
|
-
timeout_dict):
|
999
|
-
# This is a potential ACTIVE, add it to the list
|
1000
|
-
amps.append(amp)
|
1001
|
-
else:
|
1002
|
-
# This one doesn't have the VIP IP, so start
|
1003
|
-
# failovers here.
|
1004
|
-
selected_amp = amp
|
1005
|
-
LOG.debug("Selected amphora %s as the initial "
|
1006
|
-
"failover amphora.", amp.id)
|
1007
|
-
except Exception:
|
1008
|
-
# This amphora is broken, so start failovers here.
|
1009
|
-
selected_amp = amp
|
1010
|
-
else:
|
1011
|
-
# We have already found a STANDBY, so add the rest to the
|
1012
|
-
# list without querying them.
|
1013
|
-
amps.append(amp)
|
1014
|
-
# Put the selected amphora at the end of the list so it is
|
1015
|
-
# first to failover.
|
1016
|
-
if selected_amp:
|
1017
|
-
amps.append(selected_amp)
|
1018
|
-
return amps
|
1019
|
-
|
1020
|
-
LOG.error('Unknown load balancer topology found: %s, aborting '
|
1021
|
-
'failover.', load_balancer.topology)
|
1022
|
-
raise exceptions.InvalidTopology(topology=load_balancer.topology)
|
1023
|
-
|
1024
|
-
def failover_loadbalancer(self, load_balancer_id):
|
1025
|
-
"""Perform failover operations for a load balancer.
|
1026
|
-
|
1027
|
-
Note: This expects the load balancer to already be in
|
1028
|
-
provisioning_status=PENDING_UPDATE state.
|
1029
|
-
|
1030
|
-
:param load_balancer_id: ID for load balancer to failover
|
1031
|
-
:returns: None
|
1032
|
-
:raises octavia.commom.exceptions.NotFound: The load balancer was not
|
1033
|
-
found.
|
1034
|
-
"""
|
1035
|
-
try:
|
1036
|
-
lb = self._lb_repo.get(db_apis.get_session(),
|
1037
|
-
id=load_balancer_id)
|
1038
|
-
if lb is None:
|
1039
|
-
raise exceptions.NotFound(resource=constants.LOADBALANCER,
|
1040
|
-
id=load_balancer_id)
|
1041
|
-
|
1042
|
-
# Get the ordered list of amphorae to failover for this LB.
|
1043
|
-
amps = self._get_amphorae_for_failover(lb)
|
1044
|
-
|
1045
|
-
if lb.topology == constants.TOPOLOGY_SINGLE:
|
1046
|
-
if len(amps) != 1:
|
1047
|
-
LOG.warning('%d amphorae found on load balancer %s where '
|
1048
|
-
'one should exist. Repairing.', len(amps),
|
1049
|
-
load_balancer_id)
|
1050
|
-
elif lb.topology == constants.TOPOLOGY_ACTIVE_STANDBY:
|
1051
|
-
|
1052
|
-
if len(amps) != 2:
|
1053
|
-
LOG.warning('%d amphorae found on load balancer %s where '
|
1054
|
-
'two should exist. Repairing.', len(amps),
|
1055
|
-
load_balancer_id)
|
1056
|
-
else:
|
1057
|
-
LOG.error('Unknown load balancer topology found: %s, aborting '
|
1058
|
-
'failover!', lb.topology)
|
1059
|
-
raise exceptions.InvalidTopology(topology=lb.topology)
|
1060
|
-
|
1061
|
-
# Build our failover flow.
|
1062
|
-
lb_failover_flow = self._lb_flows.get_failover_LB_flow(amps, lb)
|
1063
|
-
|
1064
|
-
# We must provide a topology in the flavor definition
|
1065
|
-
# here for the amphora to be created with the correct
|
1066
|
-
# configuration.
|
1067
|
-
if lb.flavor_id:
|
1068
|
-
flavor = self._flavor_repo.get_flavor_metadata_dict(
|
1069
|
-
db_apis.get_session(), lb.flavor_id)
|
1070
|
-
flavor[constants.LOADBALANCER_TOPOLOGY] = lb.topology
|
1071
|
-
else:
|
1072
|
-
flavor = {constants.LOADBALANCER_TOPOLOGY: lb.topology}
|
1073
|
-
|
1074
|
-
stored_params = {constants.LOADBALANCER: lb,
|
1075
|
-
constants.BUILD_TYPE_PRIORITY:
|
1076
|
-
constants.LB_CREATE_FAILOVER_PRIORITY,
|
1077
|
-
constants.SERVER_GROUP_ID: lb.server_group_id,
|
1078
|
-
constants.LOADBALANCER_ID: lb.id,
|
1079
|
-
constants.FLAVOR: flavor}
|
1080
|
-
|
1081
|
-
if lb.availability_zone:
|
1082
|
-
stored_params[constants.AVAILABILITY_ZONE] = (
|
1083
|
-
self._az_repo.get_availability_zone_metadata_dict(
|
1084
|
-
db_apis.get_session(), lb.availability_zone))
|
1085
|
-
else:
|
1086
|
-
stored_params[constants.AVAILABILITY_ZONE] = {}
|
1087
|
-
|
1088
|
-
failover_lb_tf = self.taskflow_load(lb_failover_flow,
|
1089
|
-
store=stored_params)
|
1090
|
-
|
1091
|
-
with tf_logging.DynamicLoggingListener(failover_lb_tf, log=LOG):
|
1092
|
-
failover_lb_tf.run()
|
1093
|
-
LOG.info('Failover of load balancer %s completed successfully.',
|
1094
|
-
lb.id)
|
1095
|
-
|
1096
|
-
except Exception as e:
|
1097
|
-
with excutils.save_and_reraise_exception(reraise=False):
|
1098
|
-
LOG.exception("LB %(lbid)s failover exception: %(exc)s",
|
1099
|
-
{'lbid': load_balancer_id, 'exc': str(e)})
|
1100
|
-
self._lb_repo.update(
|
1101
|
-
db_apis.get_session(), load_balancer_id,
|
1102
|
-
provisioning_status=constants.ERROR)
|
1103
|
-
|
1104
|
-
def amphora_cert_rotation(self, amphora_id):
|
1105
|
-
"""Perform cert rotation for an amphora.
|
1106
|
-
|
1107
|
-
:param amphora_id: ID for amphora to rotate
|
1108
|
-
:returns: None
|
1109
|
-
:raises AmphoraNotFound: The referenced amphora was not found
|
1110
|
-
"""
|
1111
|
-
|
1112
|
-
amp = self._amphora_repo.get(db_apis.get_session(),
|
1113
|
-
id=amphora_id)
|
1114
|
-
LOG.info("Start amphora cert rotation, amphora's id is: %s",
|
1115
|
-
amphora_id)
|
1116
|
-
|
1117
|
-
certrotation_amphora_tf = self.taskflow_load(
|
1118
|
-
self._amphora_flows.cert_rotate_amphora_flow(),
|
1119
|
-
store={constants.AMPHORA: amp,
|
1120
|
-
constants.AMPHORA_ID: amp.id})
|
1121
|
-
|
1122
|
-
with tf_logging.DynamicLoggingListener(certrotation_amphora_tf,
|
1123
|
-
log=LOG):
|
1124
|
-
certrotation_amphora_tf.run()
|
1125
|
-
LOG.info("Finished amphora cert rotation, amphora's id was: %s",
|
1126
|
-
amphora_id)
|
1127
|
-
|
1128
|
-
def update_amphora_agent_config(self, amphora_id):
|
1129
|
-
"""Update the amphora agent configuration.
|
1130
|
-
|
1131
|
-
Note: This will update the amphora agent configuration file and
|
1132
|
-
update the running configuration for mutatable configuration
|
1133
|
-
items.
|
1134
|
-
|
1135
|
-
:param amphora_id: ID of the amphora to update.
|
1136
|
-
:returns: None
|
1137
|
-
"""
|
1138
|
-
LOG.info("Start amphora agent configuration update, amphora's id "
|
1139
|
-
"is: %s", amphora_id)
|
1140
|
-
amp = self._amphora_repo.get(db_apis.get_session(), id=amphora_id)
|
1141
|
-
lb = self._amphora_repo.get_lb_for_amphora(db_apis.get_session(),
|
1142
|
-
amphora_id)
|
1143
|
-
flavor = {}
|
1144
|
-
if lb.flavor_id:
|
1145
|
-
flavor = self._flavor_repo.get_flavor_metadata_dict(
|
1146
|
-
db_apis.get_session(), lb.flavor_id)
|
1147
|
-
|
1148
|
-
update_amphora_tf = self.taskflow_load(
|
1149
|
-
self._amphora_flows.update_amphora_config_flow(),
|
1150
|
-
store={constants.AMPHORA: amp,
|
1151
|
-
constants.FLAVOR: flavor})
|
1152
|
-
|
1153
|
-
with tf_logging.DynamicLoggingListener(update_amphora_tf,
|
1154
|
-
log=LOG):
|
1155
|
-
update_amphora_tf.run()
|
1156
|
-
LOG.info("Finished amphora agent configuration update, amphora's id "
|
1157
|
-
"was: %s", amphora_id)
|