octavia 12.0.0.0rc2__py3-none-any.whl → 13.0.0.0rc1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- octavia/amphorae/backends/agent/api_server/osutils.py +1 -0
- octavia/amphorae/backends/agent/api_server/plug.py +21 -7
- octavia/amphorae/backends/agent/api_server/templates/amphora-netns.systemd.j2 +2 -2
- octavia/amphorae/backends/agent/api_server/util.py +21 -0
- octavia/amphorae/backends/health_daemon/health_daemon.py +9 -3
- octavia/amphorae/backends/health_daemon/health_sender.py +2 -0
- octavia/amphorae/backends/utils/interface.py +14 -6
- octavia/amphorae/backends/utils/interface_file.py +6 -3
- octavia/amphorae/backends/utils/keepalivedlvs_query.py +8 -9
- octavia/amphorae/drivers/driver_base.py +1 -2
- octavia/amphorae/drivers/haproxy/rest_api_driver.py +11 -25
- octavia/amphorae/drivers/health/heartbeat_udp.py +34 -24
- octavia/amphorae/drivers/keepalived/jinja/jinja_cfg.py +3 -12
- octavia/amphorae/drivers/noop_driver/driver.py +3 -5
- octavia/api/common/pagination.py +4 -4
- octavia/api/drivers/amphora_driver/v2/driver.py +11 -5
- octavia/api/drivers/driver_agent/driver_get.py +22 -14
- octavia/api/drivers/driver_agent/driver_updater.py +8 -4
- octavia/api/drivers/utils.py +4 -2
- octavia/api/healthcheck/healthcheck_plugins.py +4 -2
- octavia/api/root_controller.py +4 -1
- octavia/api/v2/controllers/amphora.py +35 -38
- octavia/api/v2/controllers/availability_zone_profiles.py +43 -33
- octavia/api/v2/controllers/availability_zones.py +22 -18
- octavia/api/v2/controllers/flavor_profiles.py +37 -28
- octavia/api/v2/controllers/flavors.py +19 -15
- octavia/api/v2/controllers/health_monitor.py +44 -33
- octavia/api/v2/controllers/l7policy.py +52 -40
- octavia/api/v2/controllers/l7rule.py +68 -55
- octavia/api/v2/controllers/listener.py +88 -61
- octavia/api/v2/controllers/load_balancer.py +52 -34
- octavia/api/v2/controllers/member.py +63 -52
- octavia/api/v2/controllers/pool.py +55 -42
- octavia/api/v2/controllers/quotas.py +5 -3
- octavia/api/v2/types/listener.py +15 -0
- octavia/cmd/octavia_worker.py +0 -3
- octavia/cmd/status.py +1 -4
- octavia/common/clients.py +25 -45
- octavia/common/config.py +64 -22
- octavia/common/constants.py +3 -2
- octavia/common/data_models.py +7 -1
- octavia/common/jinja/haproxy/combined_listeners/jinja_cfg.py +12 -1
- octavia/common/jinja/haproxy/combined_listeners/templates/macros.j2 +5 -2
- octavia/common/jinja/lvs/jinja_cfg.py +4 -2
- octavia/common/keystone.py +58 -5
- octavia/common/validate.py +35 -0
- octavia/compute/drivers/noop_driver/driver.py +6 -0
- octavia/controller/healthmanager/health_manager.py +3 -6
- octavia/controller/housekeeping/house_keeping.py +36 -37
- octavia/controller/worker/amphora_rate_limit.py +5 -4
- octavia/controller/worker/task_utils.py +57 -41
- octavia/controller/worker/v2/controller_worker.py +160 -103
- octavia/controller/worker/v2/flows/listener_flows.py +3 -0
- octavia/controller/worker/v2/flows/load_balancer_flows.py +9 -14
- octavia/controller/worker/v2/tasks/amphora_driver_tasks.py +152 -91
- octavia/controller/worker/v2/tasks/compute_tasks.py +4 -2
- octavia/controller/worker/v2/tasks/database_tasks.py +542 -400
- octavia/controller/worker/v2/tasks/network_tasks.py +119 -79
- octavia/db/api.py +26 -23
- octavia/db/base_models.py +2 -2
- octavia/db/healthcheck.py +2 -1
- octavia/db/migration/alembic_migrations/versions/632152d2d32e_add_http_strict_transport_security_.py +42 -0
- octavia/db/models.py +12 -2
- octavia/db/prepare.py +2 -0
- octavia/db/repositories.py +462 -482
- octavia/hacking/checks.py +1 -1
- octavia/network/base.py +0 -14
- octavia/network/drivers/neutron/allowed_address_pairs.py +92 -135
- octavia/network/drivers/neutron/base.py +65 -77
- octavia/network/drivers/neutron/utils.py +69 -85
- octavia/network/drivers/noop_driver/driver.py +0 -7
- octavia/statistics/drivers/update_db.py +10 -10
- octavia/tests/common/constants.py +91 -84
- octavia/tests/common/sample_data_models.py +13 -1
- octavia/tests/fixtures.py +32 -0
- octavia/tests/functional/amphorae/backend/agent/api_server/test_server.py +9 -10
- octavia/tests/functional/api/drivers/driver_agent/test_driver_agent.py +260 -15
- octavia/tests/functional/api/test_root_controller.py +3 -28
- octavia/tests/functional/api/v2/base.py +5 -3
- octavia/tests/functional/api/v2/test_amphora.py +18 -5
- octavia/tests/functional/api/v2/test_availability_zone_profiles.py +1 -0
- octavia/tests/functional/api/v2/test_listener.py +51 -19
- octavia/tests/functional/api/v2/test_load_balancer.py +10 -1
- octavia/tests/functional/db/base.py +31 -16
- octavia/tests/functional/db/test_models.py +27 -28
- octavia/tests/functional/db/test_repositories.py +407 -50
- octavia/tests/unit/amphorae/backends/agent/api_server/test_amphora_info.py +2 -0
- octavia/tests/unit/amphorae/backends/agent/api_server/test_osutils.py +1 -1
- octavia/tests/unit/amphorae/backends/agent/api_server/test_plug.py +54 -6
- octavia/tests/unit/amphorae/backends/agent/api_server/test_util.py +35 -0
- octavia/tests/unit/amphorae/backends/health_daemon/test_health_daemon.py +8 -0
- octavia/tests/unit/amphorae/backends/health_daemon/test_health_sender.py +18 -0
- octavia/tests/unit/amphorae/backends/utils/test_interface.py +81 -0
- octavia/tests/unit/amphorae/backends/utils/test_interface_file.py +2 -0
- octavia/tests/unit/amphorae/backends/utils/test_keepalivedlvs_query.py +129 -5
- octavia/tests/unit/amphorae/drivers/haproxy/test_rest_api_driver_1_0.py +42 -20
- octavia/tests/unit/amphorae/drivers/health/test_heartbeat_udp.py +18 -20
- octavia/tests/unit/amphorae/drivers/keepalived/jinja/test_jinja_cfg.py +4 -4
- octavia/tests/unit/amphorae/drivers/noop_driver/test_driver.py +4 -1
- octavia/tests/unit/api/drivers/driver_agent/test_driver_get.py +3 -3
- octavia/tests/unit/api/drivers/driver_agent/test_driver_updater.py +11 -13
- octavia/tests/unit/base.py +6 -0
- octavia/tests/unit/cmd/test_interface.py +2 -2
- octavia/tests/unit/cmd/test_status.py +2 -2
- octavia/tests/unit/common/jinja/haproxy/combined_listeners/test_jinja_cfg.py +152 -1
- octavia/tests/unit/common/sample_configs/sample_configs_combined.py +10 -3
- octavia/tests/unit/common/test_clients.py +0 -39
- octavia/tests/unit/common/test_keystone.py +54 -0
- octavia/tests/unit/common/test_validate.py +67 -0
- octavia/tests/unit/controller/healthmanager/test_health_manager.py +8 -22
- octavia/tests/unit/controller/housekeeping/test_house_keeping.py +3 -64
- octavia/tests/unit/controller/worker/test_amphora_rate_limit.py +1 -1
- octavia/tests/unit/controller/worker/test_task_utils.py +44 -24
- octavia/tests/unit/controller/worker/v2/flows/test_load_balancer_flows.py +0 -1
- octavia/tests/unit/controller/worker/v2/tasks/test_amphora_driver_tasks.py +49 -26
- octavia/tests/unit/controller/worker/v2/tasks/test_database_tasks.py +399 -196
- octavia/tests/unit/controller/worker/v2/tasks/test_database_tasks_quota.py +37 -64
- octavia/tests/unit/controller/worker/v2/tasks/test_network_tasks.py +3 -14
- octavia/tests/unit/controller/worker/v2/test_controller_worker.py +2 -2
- octavia/tests/unit/network/drivers/neutron/test_allowed_address_pairs.py +456 -561
- octavia/tests/unit/network/drivers/neutron/test_base.py +181 -194
- octavia/tests/unit/network/drivers/neutron/test_utils.py +14 -30
- octavia/tests/unit/statistics/drivers/test_update_db.py +7 -5
- {octavia-12.0.0.0rc2.data → octavia-13.0.0.0rc1.data}/data/share/octavia/README.rst +1 -1
- {octavia-12.0.0.0rc2.dist-info → octavia-13.0.0.0rc1.dist-info}/AUTHORS +4 -0
- {octavia-12.0.0.0rc2.dist-info → octavia-13.0.0.0rc1.dist-info}/METADATA +4 -4
- {octavia-12.0.0.0rc2.dist-info → octavia-13.0.0.0rc1.dist-info}/RECORD +141 -189
- {octavia-12.0.0.0rc2.dist-info → octavia-13.0.0.0rc1.dist-info}/entry_points.txt +1 -2
- octavia-13.0.0.0rc1.dist-info/pbr.json +1 -0
- octavia/api/drivers/amphora_driver/v1/__init__.py +0 -11
- octavia/api/drivers/amphora_driver/v1/driver.py +0 -547
- octavia/controller/queue/v1/__init__.py +0 -11
- octavia/controller/queue/v1/consumer.py +0 -64
- octavia/controller/queue/v1/endpoints.py +0 -160
- octavia/controller/worker/v1/__init__.py +0 -11
- octavia/controller/worker/v1/controller_worker.py +0 -1157
- octavia/controller/worker/v1/flows/__init__.py +0 -11
- octavia/controller/worker/v1/flows/amphora_flows.py +0 -610
- octavia/controller/worker/v1/flows/health_monitor_flows.py +0 -105
- octavia/controller/worker/v1/flows/l7policy_flows.py +0 -94
- octavia/controller/worker/v1/flows/l7rule_flows.py +0 -100
- octavia/controller/worker/v1/flows/listener_flows.py +0 -128
- octavia/controller/worker/v1/flows/load_balancer_flows.py +0 -692
- octavia/controller/worker/v1/flows/member_flows.py +0 -230
- octavia/controller/worker/v1/flows/pool_flows.py +0 -127
- octavia/controller/worker/v1/tasks/__init__.py +0 -11
- octavia/controller/worker/v1/tasks/amphora_driver_tasks.py +0 -453
- octavia/controller/worker/v1/tasks/cert_task.py +0 -51
- octavia/controller/worker/v1/tasks/compute_tasks.py +0 -335
- octavia/controller/worker/v1/tasks/database_tasks.py +0 -2756
- octavia/controller/worker/v1/tasks/lifecycle_tasks.py +0 -173
- octavia/controller/worker/v1/tasks/model_tasks.py +0 -41
- octavia/controller/worker/v1/tasks/network_tasks.py +0 -970
- octavia/controller/worker/v1/tasks/retry_tasks.py +0 -74
- octavia/tests/unit/api/drivers/amphora_driver/v1/__init__.py +0 -11
- octavia/tests/unit/api/drivers/amphora_driver/v1/test_driver.py +0 -824
- octavia/tests/unit/controller/queue/v1/__init__.py +0 -11
- octavia/tests/unit/controller/queue/v1/test_consumer.py +0 -61
- octavia/tests/unit/controller/queue/v1/test_endpoints.py +0 -189
- octavia/tests/unit/controller/worker/v1/__init__.py +0 -11
- octavia/tests/unit/controller/worker/v1/flows/__init__.py +0 -11
- octavia/tests/unit/controller/worker/v1/flows/test_amphora_flows.py +0 -474
- octavia/tests/unit/controller/worker/v1/flows/test_health_monitor_flows.py +0 -72
- octavia/tests/unit/controller/worker/v1/flows/test_l7policy_flows.py +0 -67
- octavia/tests/unit/controller/worker/v1/flows/test_l7rule_flows.py +0 -67
- octavia/tests/unit/controller/worker/v1/flows/test_listener_flows.py +0 -91
- octavia/tests/unit/controller/worker/v1/flows/test_load_balancer_flows.py +0 -431
- octavia/tests/unit/controller/worker/v1/flows/test_member_flows.py +0 -106
- octavia/tests/unit/controller/worker/v1/flows/test_pool_flows.py +0 -77
- octavia/tests/unit/controller/worker/v1/tasks/__init__.py +0 -11
- octavia/tests/unit/controller/worker/v1/tasks/test_amphora_driver_tasks.py +0 -792
- octavia/tests/unit/controller/worker/v1/tasks/test_cert_task.py +0 -46
- octavia/tests/unit/controller/worker/v1/tasks/test_compute_tasks.py +0 -634
- octavia/tests/unit/controller/worker/v1/tasks/test_database_tasks.py +0 -2615
- octavia/tests/unit/controller/worker/v1/tasks/test_database_tasks_quota.py +0 -415
- octavia/tests/unit/controller/worker/v1/tasks/test_lifecycle_tasks.py +0 -401
- octavia/tests/unit/controller/worker/v1/tasks/test_model_tasks.py +0 -44
- octavia/tests/unit/controller/worker/v1/tasks/test_network_tasks.py +0 -1788
- octavia/tests/unit/controller/worker/v1/tasks/test_retry_tasks.py +0 -47
- octavia/tests/unit/controller/worker/v1/test_controller_worker.py +0 -2096
- octavia-12.0.0.0rc2.dist-info/pbr.json +0 -1
- {octavia-12.0.0.0rc2.data → octavia-13.0.0.0rc1.data}/data/share/octavia/LICENSE +0 -0
- {octavia-12.0.0.0rc2.data → octavia-13.0.0.0rc1.data}/data/share/octavia/diskimage-create/README.rst +0 -0
- {octavia-12.0.0.0rc2.data → octavia-13.0.0.0rc1.data}/data/share/octavia/diskimage-create/diskimage-create.sh +0 -0
- {octavia-12.0.0.0rc2.data → octavia-13.0.0.0rc1.data}/data/share/octavia/diskimage-create/image-tests.sh +0 -0
- {octavia-12.0.0.0rc2.data → octavia-13.0.0.0rc1.data}/data/share/octavia/diskimage-create/requirements.txt +0 -0
- {octavia-12.0.0.0rc2.data → octavia-13.0.0.0rc1.data}/data/share/octavia/diskimage-create/test-requirements.txt +0 -0
- {octavia-12.0.0.0rc2.data → octavia-13.0.0.0rc1.data}/data/share/octavia/diskimage-create/tox.ini +0 -0
- {octavia-12.0.0.0rc2.data → octavia-13.0.0.0rc1.data}/data/share/octavia/diskimage-create/version.txt +0 -0
- {octavia-12.0.0.0rc2.data → octavia-13.0.0.0rc1.data}/scripts/octavia-wsgi +0 -0
- {octavia-12.0.0.0rc2.dist-info → octavia-13.0.0.0rc1.dist-info}/LICENSE +0 -0
- {octavia-12.0.0.0rc2.dist-info → octavia-13.0.0.0rc1.dist-info}/WHEEL +0 -0
- {octavia-12.0.0.0rc2.dist-info → octavia-13.0.0.0rc1.dist-info}/top_level.txt +0 -0
@@ -1,11 +0,0 @@
|
|
1
|
-
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
2
|
-
# not use this file except in compliance with the License. You may obtain
|
3
|
-
# a copy of the License at
|
4
|
-
#
|
5
|
-
# http://www.apache.org/licenses/LICENSE-2.0
|
6
|
-
#
|
7
|
-
# Unless required by applicable law or agreed to in writing, software
|
8
|
-
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
9
|
-
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
10
|
-
# License for the specific language governing permissions and limitations
|
11
|
-
# under the License.
|
@@ -1,610 +0,0 @@
|
|
1
|
-
# Copyright 2015 Hewlett-Packard Development Company, L.P.
|
2
|
-
# Copyright 2020 Red Hat, Inc. All rights reserved.
|
3
|
-
#
|
4
|
-
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
5
|
-
# not use this file except in compliance with the License. You may obtain
|
6
|
-
# a copy of the License at
|
7
|
-
#
|
8
|
-
# http://www.apache.org/licenses/LICENSE-2.0
|
9
|
-
#
|
10
|
-
# Unless required by applicable law or agreed to in writing, software
|
11
|
-
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
12
|
-
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
13
|
-
# License for the specific language governing permissions and limitations
|
14
|
-
# under the License.
|
15
|
-
#
|
16
|
-
|
17
|
-
from oslo_config import cfg
|
18
|
-
from oslo_log import log as logging
|
19
|
-
from taskflow.patterns import linear_flow
|
20
|
-
from taskflow.patterns import unordered_flow
|
21
|
-
|
22
|
-
from octavia.common import constants
|
23
|
-
from octavia.common import utils
|
24
|
-
from octavia.controller.worker.v1.tasks import amphora_driver_tasks
|
25
|
-
from octavia.controller.worker.v1.tasks import cert_task
|
26
|
-
from octavia.controller.worker.v1.tasks import compute_tasks
|
27
|
-
from octavia.controller.worker.v1.tasks import database_tasks
|
28
|
-
from octavia.controller.worker.v1.tasks import lifecycle_tasks
|
29
|
-
from octavia.controller.worker.v1.tasks import network_tasks
|
30
|
-
from octavia.controller.worker.v1.tasks import retry_tasks
|
31
|
-
|
32
|
-
CONF = cfg.CONF
|
33
|
-
LOG = logging.getLogger(__name__)
|
34
|
-
|
35
|
-
|
36
|
-
class AmphoraFlows(object):
|
37
|
-
|
38
|
-
def get_create_amphora_flow(self):
|
39
|
-
"""Creates a flow to create an amphora.
|
40
|
-
|
41
|
-
:returns: The flow for creating the amphora
|
42
|
-
"""
|
43
|
-
create_amphora_flow = linear_flow.Flow(constants.CREATE_AMPHORA_FLOW)
|
44
|
-
create_amphora_flow.add(database_tasks.CreateAmphoraInDB(
|
45
|
-
provides=constants.AMPHORA_ID))
|
46
|
-
create_amphora_flow.add(lifecycle_tasks.AmphoraIDToErrorOnRevertTask(
|
47
|
-
requires=constants.AMPHORA_ID))
|
48
|
-
create_amphora_flow.add(cert_task.GenerateServerPEMTask(
|
49
|
-
provides=constants.SERVER_PEM))
|
50
|
-
create_amphora_flow.add(
|
51
|
-
database_tasks.UpdateAmphoraDBCertExpiration(
|
52
|
-
requires=(constants.AMPHORA_ID, constants.SERVER_PEM)))
|
53
|
-
create_amphora_flow.add(compute_tasks.CertComputeCreate(
|
54
|
-
requires=(constants.AMPHORA_ID, constants.SERVER_PEM,
|
55
|
-
constants.SERVER_GROUP_ID, constants.BUILD_TYPE_PRIORITY,
|
56
|
-
constants.FLAVOR, constants.AVAILABILITY_ZONE),
|
57
|
-
provides=constants.COMPUTE_ID))
|
58
|
-
create_amphora_flow.add(database_tasks.MarkAmphoraBootingInDB(
|
59
|
-
requires=(constants.AMPHORA_ID, constants.COMPUTE_ID)))
|
60
|
-
create_amphora_flow.add(compute_tasks.ComputeActiveWait(
|
61
|
-
requires=(constants.COMPUTE_ID, constants.AMPHORA_ID),
|
62
|
-
provides=constants.COMPUTE_OBJ))
|
63
|
-
create_amphora_flow.add(database_tasks.UpdateAmphoraInfo(
|
64
|
-
requires=(constants.AMPHORA_ID, constants.COMPUTE_OBJ),
|
65
|
-
provides=constants.AMPHORA))
|
66
|
-
create_amphora_flow.add(
|
67
|
-
amphora_driver_tasks.AmphoraComputeConnectivityWait(
|
68
|
-
requires=constants.AMPHORA))
|
69
|
-
create_amphora_flow.add(database_tasks.ReloadAmphora(
|
70
|
-
requires=constants.AMPHORA_ID,
|
71
|
-
provides=constants.AMPHORA))
|
72
|
-
create_amphora_flow.add(amphora_driver_tasks.AmphoraFinalize(
|
73
|
-
requires=constants.AMPHORA))
|
74
|
-
create_amphora_flow.add(database_tasks.MarkAmphoraReadyInDB(
|
75
|
-
requires=constants.AMPHORA))
|
76
|
-
|
77
|
-
return create_amphora_flow
|
78
|
-
|
79
|
-
def _get_post_map_lb_subflow(self, prefix, role):
|
80
|
-
"""Set amphora type after mapped to lb."""
|
81
|
-
|
82
|
-
sf_name = prefix + '-' + constants.POST_MAP_AMP_TO_LB_SUBFLOW
|
83
|
-
post_map_amp_to_lb = linear_flow.Flow(
|
84
|
-
sf_name)
|
85
|
-
|
86
|
-
post_map_amp_to_lb.add(database_tasks.ReloadAmphora(
|
87
|
-
name=sf_name + '-' + constants.RELOAD_AMPHORA,
|
88
|
-
requires=constants.AMPHORA_ID,
|
89
|
-
provides=constants.AMPHORA))
|
90
|
-
|
91
|
-
post_map_amp_to_lb.add(amphora_driver_tasks.AmphoraConfigUpdate(
|
92
|
-
name=sf_name + '-' + constants.AMPHORA_CONFIG_UPDATE_TASK,
|
93
|
-
requires=(constants.AMPHORA, constants.FLAVOR)))
|
94
|
-
|
95
|
-
if role == constants.ROLE_MASTER:
|
96
|
-
post_map_amp_to_lb.add(database_tasks.MarkAmphoraMasterInDB(
|
97
|
-
name=sf_name + '-' + constants.MARK_AMP_MASTER_INDB,
|
98
|
-
requires=constants.AMPHORA))
|
99
|
-
elif role == constants.ROLE_BACKUP:
|
100
|
-
post_map_amp_to_lb.add(database_tasks.MarkAmphoraBackupInDB(
|
101
|
-
name=sf_name + '-' + constants.MARK_AMP_BACKUP_INDB,
|
102
|
-
requires=constants.AMPHORA))
|
103
|
-
elif role == constants.ROLE_STANDALONE:
|
104
|
-
post_map_amp_to_lb.add(database_tasks.MarkAmphoraStandAloneInDB(
|
105
|
-
name=sf_name + '-' + constants.MARK_AMP_STANDALONE_INDB,
|
106
|
-
requires=constants.AMPHORA))
|
107
|
-
|
108
|
-
return post_map_amp_to_lb
|
109
|
-
|
110
|
-
def _get_create_amp_for_lb_subflow(self, prefix, role):
|
111
|
-
"""Create a new amphora for lb."""
|
112
|
-
|
113
|
-
sf_name = prefix + '-' + constants.CREATE_AMP_FOR_LB_SUBFLOW
|
114
|
-
create_amp_for_lb_subflow = linear_flow.Flow(sf_name)
|
115
|
-
create_amp_for_lb_subflow.add(database_tasks.CreateAmphoraInDB(
|
116
|
-
name=sf_name + '-' + constants.CREATE_AMPHORA_INDB,
|
117
|
-
requires=constants.LOADBALANCER_ID,
|
118
|
-
provides=constants.AMPHORA_ID))
|
119
|
-
|
120
|
-
create_amp_for_lb_subflow.add(cert_task.GenerateServerPEMTask(
|
121
|
-
name=sf_name + '-' + constants.GENERATE_SERVER_PEM,
|
122
|
-
provides=constants.SERVER_PEM))
|
123
|
-
|
124
|
-
create_amp_for_lb_subflow.add(
|
125
|
-
database_tasks.UpdateAmphoraDBCertExpiration(
|
126
|
-
name=sf_name + '-' + constants.UPDATE_CERT_EXPIRATION,
|
127
|
-
requires=(constants.AMPHORA_ID, constants.SERVER_PEM)))
|
128
|
-
|
129
|
-
create_amp_for_lb_subflow.add(compute_tasks.CertComputeCreate(
|
130
|
-
name=sf_name + '-' + constants.CERT_COMPUTE_CREATE,
|
131
|
-
requires=(constants.AMPHORA_ID, constants.SERVER_PEM,
|
132
|
-
constants.BUILD_TYPE_PRIORITY,
|
133
|
-
constants.SERVER_GROUP_ID,
|
134
|
-
constants.FLAVOR, constants.AVAILABILITY_ZONE),
|
135
|
-
provides=constants.COMPUTE_ID))
|
136
|
-
create_amp_for_lb_subflow.add(database_tasks.UpdateAmphoraComputeId(
|
137
|
-
name=sf_name + '-' + constants.UPDATE_AMPHORA_COMPUTEID,
|
138
|
-
requires=(constants.AMPHORA_ID, constants.COMPUTE_ID)))
|
139
|
-
create_amp_for_lb_subflow.add(database_tasks.MarkAmphoraBootingInDB(
|
140
|
-
name=sf_name + '-' + constants.MARK_AMPHORA_BOOTING_INDB,
|
141
|
-
requires=(constants.AMPHORA_ID, constants.COMPUTE_ID)))
|
142
|
-
create_amp_for_lb_subflow.add(compute_tasks.ComputeActiveWait(
|
143
|
-
name=sf_name + '-' + constants.COMPUTE_WAIT,
|
144
|
-
requires=(constants.COMPUTE_ID, constants.AMPHORA_ID,
|
145
|
-
constants.AVAILABILITY_ZONE),
|
146
|
-
provides=constants.COMPUTE_OBJ))
|
147
|
-
create_amp_for_lb_subflow.add(database_tasks.UpdateAmphoraInfo(
|
148
|
-
name=sf_name + '-' + constants.UPDATE_AMPHORA_INFO,
|
149
|
-
requires=(constants.AMPHORA_ID, constants.COMPUTE_OBJ),
|
150
|
-
provides=constants.AMPHORA))
|
151
|
-
create_amp_for_lb_subflow.add(
|
152
|
-
amphora_driver_tasks.AmphoraComputeConnectivityWait(
|
153
|
-
name=sf_name + '-' + constants.AMP_COMPUTE_CONNECTIVITY_WAIT,
|
154
|
-
requires=constants.AMPHORA))
|
155
|
-
create_amp_for_lb_subflow.add(amphora_driver_tasks.AmphoraFinalize(
|
156
|
-
name=sf_name + '-' + constants.AMPHORA_FINALIZE,
|
157
|
-
requires=constants.AMPHORA))
|
158
|
-
create_amp_for_lb_subflow.add(
|
159
|
-
database_tasks.MarkAmphoraAllocatedInDB(
|
160
|
-
name=sf_name + '-' + constants.MARK_AMPHORA_ALLOCATED_INDB,
|
161
|
-
requires=(constants.AMPHORA, constants.LOADBALANCER_ID)))
|
162
|
-
create_amp_for_lb_subflow.add(database_tasks.ReloadAmphora(
|
163
|
-
name=sf_name + '-' + constants.RELOAD_AMPHORA,
|
164
|
-
requires=constants.AMPHORA_ID,
|
165
|
-
provides=constants.AMPHORA))
|
166
|
-
|
167
|
-
if role == constants.ROLE_MASTER:
|
168
|
-
create_amp_for_lb_subflow.add(database_tasks.MarkAmphoraMasterInDB(
|
169
|
-
name=sf_name + '-' + constants.MARK_AMP_MASTER_INDB,
|
170
|
-
requires=constants.AMPHORA))
|
171
|
-
elif role == constants.ROLE_BACKUP:
|
172
|
-
create_amp_for_lb_subflow.add(database_tasks.MarkAmphoraBackupInDB(
|
173
|
-
name=sf_name + '-' + constants.MARK_AMP_BACKUP_INDB,
|
174
|
-
requires=constants.AMPHORA))
|
175
|
-
elif role == constants.ROLE_STANDALONE:
|
176
|
-
create_amp_for_lb_subflow.add(
|
177
|
-
database_tasks.MarkAmphoraStandAloneInDB(
|
178
|
-
name=sf_name + '-' + constants.MARK_AMP_STANDALONE_INDB,
|
179
|
-
requires=constants.AMPHORA))
|
180
|
-
|
181
|
-
return create_amp_for_lb_subflow
|
182
|
-
|
183
|
-
def get_amphora_for_lb_subflow(
|
184
|
-
self, prefix, role=constants.ROLE_STANDALONE):
|
185
|
-
return self._get_create_amp_for_lb_subflow(prefix, role)
|
186
|
-
|
187
|
-
def get_delete_amphora_flow(
|
188
|
-
self, amphora,
|
189
|
-
retry_attempts=CONF.controller_worker.amphora_delete_retries,
|
190
|
-
retry_interval=(
|
191
|
-
CONF.controller_worker.amphora_delete_retry_interval)):
|
192
|
-
"""Creates a subflow to delete an amphora and it's port.
|
193
|
-
|
194
|
-
This flow is idempotent and safe to retry.
|
195
|
-
|
196
|
-
:param amphora: An amphora object.
|
197
|
-
:param retry_attempts: The number of times the flow is retried.
|
198
|
-
:param retry_interval: The time to wait, in seconds, between retries.
|
199
|
-
:returns: The subflow for deleting the amphora.
|
200
|
-
:raises AmphoraNotFound: The referenced Amphora was not found.
|
201
|
-
"""
|
202
|
-
|
203
|
-
delete_amphora_flow = linear_flow.Flow(
|
204
|
-
name=constants.DELETE_AMPHORA_FLOW + '-' + amphora.id,
|
205
|
-
retry=retry_tasks.SleepingRetryTimesController(
|
206
|
-
name='retry-' + constants.DELETE_AMPHORA_FLOW + '-' +
|
207
|
-
amphora.id,
|
208
|
-
attempts=retry_attempts, interval=retry_interval))
|
209
|
-
delete_amphora_flow.add(lifecycle_tasks.AmphoraToErrorOnRevertTask(
|
210
|
-
name=constants.AMPHORA_TO_ERROR_ON_REVERT + '-' + amphora.id,
|
211
|
-
inject={constants.AMPHORA: amphora}))
|
212
|
-
delete_amphora_flow.add(
|
213
|
-
database_tasks.MarkAmphoraPendingDeleteInDB(
|
214
|
-
name=constants.MARK_AMPHORA_PENDING_DELETE + '-' + amphora.id,
|
215
|
-
inject={constants.AMPHORA: amphora}))
|
216
|
-
delete_amphora_flow.add(database_tasks.MarkAmphoraHealthBusy(
|
217
|
-
name=constants.MARK_AMPHORA_HEALTH_BUSY + '-' + amphora.id,
|
218
|
-
inject={constants.AMPHORA: amphora}))
|
219
|
-
delete_amphora_flow.add(compute_tasks.ComputeDelete(
|
220
|
-
name=constants.DELETE_AMPHORA + '-' + amphora.id,
|
221
|
-
inject={constants.AMPHORA: amphora,
|
222
|
-
constants.PASSIVE_FAILURE: True}))
|
223
|
-
delete_amphora_flow.add(database_tasks.DisableAmphoraHealthMonitoring(
|
224
|
-
name=constants.DISABLE_AMP_HEALTH_MONITORING + '-' + amphora.id,
|
225
|
-
inject={constants.AMPHORA: amphora}))
|
226
|
-
delete_amphora_flow.add(database_tasks.MarkAmphoraDeletedInDB(
|
227
|
-
name=constants.MARK_AMPHORA_DELETED + '-' + amphora.id,
|
228
|
-
inject={constants.AMPHORA: amphora}))
|
229
|
-
if amphora.vrrp_port_id:
|
230
|
-
delete_amphora_flow.add(network_tasks.DeletePort(
|
231
|
-
name=(constants.DELETE_PORT + '-' + str(amphora.id) + '-' +
|
232
|
-
str(amphora.vrrp_port_id)),
|
233
|
-
inject={constants.PORT_ID: amphora.vrrp_port_id,
|
234
|
-
constants.PASSIVE_FAILURE: True}))
|
235
|
-
# TODO(johnsom) What about cleaning up any member ports?
|
236
|
-
# maybe we should get the list of attached ports prior to delete
|
237
|
-
# and call delete on them here. Fix this as part of
|
238
|
-
# https://storyboard.openstack.org/#!/story/2007077
|
239
|
-
|
240
|
-
return delete_amphora_flow
|
241
|
-
|
242
|
-
def get_vrrp_subflow(self, prefix, timeout_dict=None,
|
243
|
-
create_vrrp_group=True):
|
244
|
-
sf_name = prefix + '-' + constants.GET_VRRP_SUBFLOW
|
245
|
-
vrrp_subflow = linear_flow.Flow(sf_name)
|
246
|
-
|
247
|
-
# Optimization for failover flow. No reason to call this
|
248
|
-
# when configuring the secondary amphora.
|
249
|
-
if create_vrrp_group:
|
250
|
-
vrrp_subflow.add(database_tasks.CreateVRRPGroupForLB(
|
251
|
-
name=sf_name + '-' + constants.CREATE_VRRP_GROUP_FOR_LB,
|
252
|
-
requires=constants.LOADBALANCER_ID))
|
253
|
-
|
254
|
-
vrrp_subflow.add(network_tasks.GetAmphoraeNetworkConfigs(
|
255
|
-
name=sf_name + '-' + constants.GET_AMP_NETWORK_CONFIG,
|
256
|
-
requires=constants.LOADBALANCER_ID,
|
257
|
-
provides=constants.AMPHORAE_NETWORK_CONFIG))
|
258
|
-
|
259
|
-
# VRRP update needs to be run on all amphora to update
|
260
|
-
# their peer configurations. So parallelize this with an
|
261
|
-
# unordered subflow.
|
262
|
-
update_amps_subflow = unordered_flow.Flow('VRRP-update-subflow')
|
263
|
-
|
264
|
-
# We have three tasks to run in order, per amphora
|
265
|
-
amp_0_subflow = linear_flow.Flow('VRRP-amp-0-update-subflow')
|
266
|
-
|
267
|
-
amp_0_subflow.add(amphora_driver_tasks.AmphoraIndexUpdateVRRPInterface(
|
268
|
-
name=sf_name + '-0-' + constants.AMP_UPDATE_VRRP_INTF,
|
269
|
-
requires=constants.AMPHORAE,
|
270
|
-
inject={constants.AMPHORA_INDEX: 0,
|
271
|
-
constants.TIMEOUT_DICT: timeout_dict},
|
272
|
-
provides=constants.AMP_VRRP_INT))
|
273
|
-
|
274
|
-
amp_0_subflow.add(amphora_driver_tasks.AmphoraIndexVRRPUpdate(
|
275
|
-
name=sf_name + '-0-' + constants.AMP_VRRP_UPDATE,
|
276
|
-
requires=(constants.LOADBALANCER_ID,
|
277
|
-
constants.AMPHORAE_NETWORK_CONFIG, constants.AMPHORAE,
|
278
|
-
constants.AMP_VRRP_INT),
|
279
|
-
inject={constants.AMPHORA_INDEX: 0,
|
280
|
-
constants.TIMEOUT_DICT: timeout_dict}))
|
281
|
-
|
282
|
-
amp_0_subflow.add(amphora_driver_tasks.AmphoraIndexVRRPStart(
|
283
|
-
name=sf_name + '-0-' + constants.AMP_VRRP_START,
|
284
|
-
requires=constants.AMPHORAE,
|
285
|
-
inject={constants.AMPHORA_INDEX: 0,
|
286
|
-
constants.TIMEOUT_DICT: timeout_dict}))
|
287
|
-
|
288
|
-
amp_1_subflow = linear_flow.Flow('VRRP-amp-1-update-subflow')
|
289
|
-
|
290
|
-
amp_1_subflow.add(amphora_driver_tasks.AmphoraIndexUpdateVRRPInterface(
|
291
|
-
name=sf_name + '-1-' + constants.AMP_UPDATE_VRRP_INTF,
|
292
|
-
requires=constants.AMPHORAE,
|
293
|
-
inject={constants.AMPHORA_INDEX: 1,
|
294
|
-
constants.TIMEOUT_DICT: timeout_dict},
|
295
|
-
provides=constants.AMP_VRRP_INT))
|
296
|
-
|
297
|
-
amp_1_subflow.add(amphora_driver_tasks.AmphoraIndexVRRPUpdate(
|
298
|
-
name=sf_name + '-1-' + constants.AMP_VRRP_UPDATE,
|
299
|
-
requires=(constants.LOADBALANCER_ID,
|
300
|
-
constants.AMPHORAE_NETWORK_CONFIG, constants.AMPHORAE,
|
301
|
-
constants.AMP_VRRP_INT),
|
302
|
-
inject={constants.AMPHORA_INDEX: 1,
|
303
|
-
constants.TIMEOUT_DICT: timeout_dict}))
|
304
|
-
amp_1_subflow.add(amphora_driver_tasks.AmphoraIndexVRRPStart(
|
305
|
-
name=sf_name + '-1-' + constants.AMP_VRRP_START,
|
306
|
-
requires=constants.AMPHORAE,
|
307
|
-
inject={constants.AMPHORA_INDEX: 1,
|
308
|
-
constants.TIMEOUT_DICT: timeout_dict}))
|
309
|
-
|
310
|
-
update_amps_subflow.add(amp_0_subflow)
|
311
|
-
update_amps_subflow.add(amp_1_subflow)
|
312
|
-
|
313
|
-
vrrp_subflow.add(update_amps_subflow)
|
314
|
-
|
315
|
-
return vrrp_subflow
|
316
|
-
|
317
|
-
def cert_rotate_amphora_flow(self):
|
318
|
-
"""Implement rotation for amphora's cert.
|
319
|
-
|
320
|
-
1. Create a new certificate
|
321
|
-
2. Upload the cert to amphora
|
322
|
-
3. update the newly created certificate info to amphora
|
323
|
-
4. update the cert_busy flag to be false after rotation
|
324
|
-
|
325
|
-
:returns: The flow for updating an amphora
|
326
|
-
"""
|
327
|
-
rotated_amphora_flow = linear_flow.Flow(
|
328
|
-
constants.CERT_ROTATE_AMPHORA_FLOW)
|
329
|
-
|
330
|
-
rotated_amphora_flow.add(lifecycle_tasks.AmphoraToErrorOnRevertTask(
|
331
|
-
requires=constants.AMPHORA))
|
332
|
-
|
333
|
-
# create a new certificate, the returned value is the newly created
|
334
|
-
# certificate
|
335
|
-
rotated_amphora_flow.add(cert_task.GenerateServerPEMTask(
|
336
|
-
provides=constants.SERVER_PEM))
|
337
|
-
|
338
|
-
# update it in amphora task
|
339
|
-
rotated_amphora_flow.add(amphora_driver_tasks.AmphoraCertUpload(
|
340
|
-
requires=(constants.AMPHORA, constants.SERVER_PEM)))
|
341
|
-
|
342
|
-
# update the newly created certificate info to amphora
|
343
|
-
rotated_amphora_flow.add(database_tasks.UpdateAmphoraDBCertExpiration(
|
344
|
-
requires=(constants.AMPHORA_ID, constants.SERVER_PEM)))
|
345
|
-
|
346
|
-
# update the cert_busy flag to be false after rotation
|
347
|
-
rotated_amphora_flow.add(database_tasks.UpdateAmphoraCertBusyToFalse(
|
348
|
-
requires=constants.AMPHORA))
|
349
|
-
|
350
|
-
return rotated_amphora_flow
|
351
|
-
|
352
|
-
def update_amphora_config_flow(self):
|
353
|
-
"""Creates a flow to update the amphora agent configuration.
|
354
|
-
|
355
|
-
:returns: The flow for updating an amphora
|
356
|
-
"""
|
357
|
-
update_amphora_flow = linear_flow.Flow(
|
358
|
-
constants.UPDATE_AMPHORA_CONFIG_FLOW)
|
359
|
-
|
360
|
-
update_amphora_flow.add(lifecycle_tasks.AmphoraToErrorOnRevertTask(
|
361
|
-
requires=constants.AMPHORA))
|
362
|
-
|
363
|
-
update_amphora_flow.add(amphora_driver_tasks.AmphoraConfigUpdate(
|
364
|
-
requires=(constants.AMPHORA, constants.FLAVOR)))
|
365
|
-
|
366
|
-
return update_amphora_flow
|
367
|
-
|
368
|
-
def get_amphora_for_lb_failover_subflow(
|
369
|
-
self, prefix, role=constants.ROLE_STANDALONE,
|
370
|
-
failed_amp_vrrp_port_id=None, is_vrrp_ipv6=False):
|
371
|
-
"""Creates a new amphora that will be used in a failover flow.
|
372
|
-
|
373
|
-
:requires: loadbalancer_id, flavor, vip, vip_sg_id, loadbalancer
|
374
|
-
:provides: amphora_id, amphora
|
375
|
-
:param prefix: The flow name prefix to use on the flow and tasks.
|
376
|
-
:param role: The role this amphora will have in the topology.
|
377
|
-
:param failed_amp_vrrp_port_id: The base port ID of the failed amp.
|
378
|
-
:param is_vrrp_ipv6: True if the base port IP is IPv6.
|
379
|
-
:return: A Taskflow sub-flow that will create the amphora.
|
380
|
-
"""
|
381
|
-
|
382
|
-
sf_name = prefix + '-' + constants.CREATE_AMP_FOR_FAILOVER_SUBFLOW
|
383
|
-
|
384
|
-
amp_for_failover_flow = linear_flow.Flow(sf_name)
|
385
|
-
|
386
|
-
# Try to allocate or boot an amphora instance (unconfigured)
|
387
|
-
amp_for_failover_flow.add(self.get_amphora_for_lb_subflow(
|
388
|
-
prefix=prefix + '-' + constants.FAILOVER_LOADBALANCER_FLOW,
|
389
|
-
role=role))
|
390
|
-
|
391
|
-
# Create the VIP base (aka VRRP) port for the amphora.
|
392
|
-
amp_for_failover_flow.add(network_tasks.CreateVIPBasePort(
|
393
|
-
name=prefix + '-' + constants.CREATE_VIP_BASE_PORT,
|
394
|
-
requires=(constants.VIP, constants.VIP_SG_ID,
|
395
|
-
constants.AMPHORA_ID),
|
396
|
-
provides=constants.BASE_PORT))
|
397
|
-
|
398
|
-
# Attach the VIP base (aka VRRP) port to the amphora.
|
399
|
-
amp_for_failover_flow.add(compute_tasks.AttachPort(
|
400
|
-
name=prefix + '-' + constants.ATTACH_PORT,
|
401
|
-
requires=(constants.AMPHORA, constants.PORT),
|
402
|
-
rebind={constants.PORT: constants.BASE_PORT}))
|
403
|
-
|
404
|
-
# Update the amphora database record with the VIP base port info.
|
405
|
-
amp_for_failover_flow.add(database_tasks.UpdateAmpFailoverDetails(
|
406
|
-
name=prefix + '-' + constants.UPDATE_AMP_FAILOVER_DETAILS,
|
407
|
-
requires=(constants.AMPHORA, constants.VIP, constants.BASE_PORT)))
|
408
|
-
|
409
|
-
# Make sure the amphora in the flow storage is up to date
|
410
|
-
# or the vrrp_ip will be empty
|
411
|
-
amp_for_failover_flow.add(database_tasks.ReloadAmphora(
|
412
|
-
name=prefix + '-' + constants.RELOAD_AMPHORA,
|
413
|
-
requires=constants.AMPHORA_ID, provides=constants.AMPHORA))
|
414
|
-
|
415
|
-
# Update the amphora networking for the plugged VIP port
|
416
|
-
amp_for_failover_flow.add(network_tasks.GetAmphoraNetworkConfigsByID(
|
417
|
-
name=prefix + '-' + constants.GET_AMPHORA_NETWORK_CONFIGS_BY_ID,
|
418
|
-
requires=(constants.LOADBALANCER_ID, constants.AMPHORA_ID),
|
419
|
-
provides=constants.AMPHORAE_NETWORK_CONFIG))
|
420
|
-
|
421
|
-
# Disable the base (vrrp) port on the failed amphora
|
422
|
-
# This prevents a DAD failure when bringing up the new amphora.
|
423
|
-
# Keepalived will handle this for act/stdby.
|
424
|
-
if (role == constants.ROLE_STANDALONE and failed_amp_vrrp_port_id and
|
425
|
-
is_vrrp_ipv6):
|
426
|
-
amp_for_failover_flow.add(network_tasks.AdminDownPort(
|
427
|
-
name=prefix + '-' + constants.ADMIN_DOWN_PORT,
|
428
|
-
inject={constants.PORT_ID: failed_amp_vrrp_port_id}))
|
429
|
-
|
430
|
-
amp_for_failover_flow.add(amphora_driver_tasks.AmphoraPostVIPPlug(
|
431
|
-
name=prefix + '-' + constants.AMPHORA_POST_VIP_PLUG,
|
432
|
-
requires=(constants.AMPHORA, constants.LOADBALANCER,
|
433
|
-
constants.AMPHORAE_NETWORK_CONFIG)))
|
434
|
-
|
435
|
-
# Plug member ports
|
436
|
-
amp_for_failover_flow.add(network_tasks.CalculateAmphoraDelta(
|
437
|
-
name=prefix + '-' + constants.CALCULATE_AMPHORA_DELTA,
|
438
|
-
requires=(constants.LOADBALANCER, constants.AMPHORA,
|
439
|
-
constants.AVAILABILITY_ZONE),
|
440
|
-
provides=constants.DELTA))
|
441
|
-
|
442
|
-
amp_for_failover_flow.add(network_tasks.HandleNetworkDelta(
|
443
|
-
name=prefix + '-' + constants.HANDLE_NETWORK_DELTA,
|
444
|
-
requires=(constants.AMPHORA, constants.DELTA),
|
445
|
-
provides=constants.UPDATED_PORTS))
|
446
|
-
|
447
|
-
amp_for_failover_flow.add(amphora_driver_tasks.AmphoraePostNetworkPlug(
|
448
|
-
name=prefix + '-' + constants.AMPHORAE_POST_NETWORK_PLUG,
|
449
|
-
requires=(constants.LOADBALANCER, constants.UPDATED_PORTS)))
|
450
|
-
|
451
|
-
return amp_for_failover_flow
|
452
|
-
|
453
|
-
def get_failover_amphora_flow(self, failed_amphora, lb_amp_count):
|
454
|
-
"""Get a Taskflow flow to failover an amphora.
|
455
|
-
|
456
|
-
1. Build a replacement amphora.
|
457
|
-
2. Delete the old amphora.
|
458
|
-
3. Update the amphorae listener configurations.
|
459
|
-
4. Update the VRRP configurations if needed.
|
460
|
-
|
461
|
-
:param failed_amphora: The amphora object to failover.
|
462
|
-
:param lb_amp_count: The number of amphora on this load balancer.
|
463
|
-
:returns: The flow that will provide the failover.
|
464
|
-
"""
|
465
|
-
failover_amp_flow = linear_flow.Flow(
|
466
|
-
constants.FAILOVER_AMPHORA_FLOW)
|
467
|
-
|
468
|
-
# Revert amphora to status ERROR if this flow goes wrong
|
469
|
-
failover_amp_flow.add(lifecycle_tasks.AmphoraToErrorOnRevertTask(
|
470
|
-
requires=constants.AMPHORA,
|
471
|
-
inject={constants.AMPHORA: failed_amphora}))
|
472
|
-
|
473
|
-
if failed_amphora.role in (constants.ROLE_MASTER,
|
474
|
-
constants.ROLE_BACKUP):
|
475
|
-
amp_role = 'master_or_backup'
|
476
|
-
elif failed_amphora.role == constants.ROLE_STANDALONE:
|
477
|
-
amp_role = 'standalone'
|
478
|
-
else:
|
479
|
-
amp_role = 'undefined'
|
480
|
-
LOG.info("Performing failover for amphora: %s",
|
481
|
-
{"id": failed_amphora.id,
|
482
|
-
"load_balancer_id": failed_amphora.load_balancer_id,
|
483
|
-
"lb_network_ip": failed_amphora.lb_network_ip,
|
484
|
-
"compute_id": failed_amphora.compute_id,
|
485
|
-
"role": amp_role})
|
486
|
-
|
487
|
-
failover_amp_flow.add(database_tasks.MarkAmphoraPendingDeleteInDB(
|
488
|
-
requires=constants.AMPHORA,
|
489
|
-
inject={constants.AMPHORA: failed_amphora}))
|
490
|
-
|
491
|
-
failover_amp_flow.add(database_tasks.MarkAmphoraHealthBusy(
|
492
|
-
requires=constants.AMPHORA,
|
493
|
-
inject={constants.AMPHORA: failed_amphora}))
|
494
|
-
|
495
|
-
failover_amp_flow.add(network_tasks.GetVIPSecurityGroupID(
|
496
|
-
requires=constants.LOADBALANCER_ID,
|
497
|
-
provides=constants.VIP_SG_ID))
|
498
|
-
|
499
|
-
is_vrrp_ipv6 = False
|
500
|
-
if failed_amphora.load_balancer_id:
|
501
|
-
if failed_amphora.vrrp_ip:
|
502
|
-
is_vrrp_ipv6 = utils.is_ipv6(failed_amphora.vrrp_ip)
|
503
|
-
|
504
|
-
# Get a replacement amphora and plug all of the networking.
|
505
|
-
#
|
506
|
-
# Do this early as the compute services have been observed to be
|
507
|
-
# unreliable. The community decided the chance that deleting first
|
508
|
-
# would open resources for an instance is less likely than the
|
509
|
-
# compute service failing to boot an instance for other reasons.
|
510
|
-
|
511
|
-
# TODO(johnsom) Move this back out to run for spares after
|
512
|
-
# delete amphora API is available.
|
513
|
-
failover_amp_flow.add(self.get_amphora_for_lb_failover_subflow(
|
514
|
-
prefix=constants.FAILOVER_LOADBALANCER_FLOW,
|
515
|
-
role=failed_amphora.role,
|
516
|
-
failed_amp_vrrp_port_id=failed_amphora.vrrp_port_id,
|
517
|
-
is_vrrp_ipv6=is_vrrp_ipv6))
|
518
|
-
|
519
|
-
failover_amp_flow.add(
|
520
|
-
self.get_delete_amphora_flow(
|
521
|
-
failed_amphora,
|
522
|
-
retry_attempts=CONF.controller_worker.amphora_delete_retries,
|
523
|
-
retry_interval=(
|
524
|
-
CONF.controller_worker.amphora_delete_retry_interval)))
|
525
|
-
failover_amp_flow.add(
|
526
|
-
database_tasks.DisableAmphoraHealthMonitoring(
|
527
|
-
requires=constants.AMPHORA,
|
528
|
-
inject={constants.AMPHORA: failed_amphora}))
|
529
|
-
|
530
|
-
if not failed_amphora.load_balancer_id:
|
531
|
-
# This is an unallocated amphora (bogus), we are done.
|
532
|
-
return failover_amp_flow
|
533
|
-
|
534
|
-
failover_amp_flow.add(database_tasks.GetLoadBalancer(
|
535
|
-
requires=constants.LOADBALANCER_ID,
|
536
|
-
inject={constants.LOADBALANCER_ID:
|
537
|
-
failed_amphora.load_balancer_id},
|
538
|
-
provides=constants.LOADBALANCER))
|
539
|
-
|
540
|
-
failover_amp_flow.add(database_tasks.GetAmphoraeFromLoadbalancer(
|
541
|
-
name=constants.GET_AMPHORAE_FROM_LB,
|
542
|
-
requires=constants.LOADBALANCER_ID,
|
543
|
-
inject={constants.LOADBALANCER_ID:
|
544
|
-
failed_amphora.load_balancer_id},
|
545
|
-
provides=constants.AMPHORAE))
|
546
|
-
|
547
|
-
# Setup timeouts for our requests to the amphorae
|
548
|
-
timeout_dict = {
|
549
|
-
constants.CONN_MAX_RETRIES:
|
550
|
-
CONF.haproxy_amphora.active_connection_max_retries,
|
551
|
-
constants.CONN_RETRY_INTERVAL:
|
552
|
-
CONF.haproxy_amphora.active_connection_retry_interval}
|
553
|
-
|
554
|
-
# Listeners update needs to be run on all amphora to update
|
555
|
-
# their peer configurations. So parallelize this with an
|
556
|
-
# unordered subflow.
|
557
|
-
update_amps_subflow = unordered_flow.Flow(
|
558
|
-
constants.UPDATE_AMPS_SUBFLOW)
|
559
|
-
|
560
|
-
for amp_index in range(0, lb_amp_count):
|
561
|
-
update_amps_subflow.add(
|
562
|
-
amphora_driver_tasks.AmphoraIndexListenerUpdate(
|
563
|
-
name=str(amp_index) + '-' + constants.AMP_LISTENER_UPDATE,
|
564
|
-
requires=(constants.LOADBALANCER, constants.AMPHORAE),
|
565
|
-
inject={constants.AMPHORA_INDEX: amp_index,
|
566
|
-
constants.TIMEOUT_DICT: timeout_dict}))
|
567
|
-
|
568
|
-
failover_amp_flow.add(update_amps_subflow)
|
569
|
-
|
570
|
-
# Configure and enable keepalived in the amphora
|
571
|
-
if lb_amp_count == 2:
|
572
|
-
failover_amp_flow.add(
|
573
|
-
self.get_vrrp_subflow(constants.GET_VRRP_SUBFLOW,
|
574
|
-
timeout_dict, create_vrrp_group=False))
|
575
|
-
|
576
|
-
# Reload the listener. This needs to be done here because
|
577
|
-
# it will create the required haproxy check scripts for
|
578
|
-
# the VRRP deployed above.
|
579
|
-
# A "U" or newer amphora-agent will remove the need for this
|
580
|
-
# task here.
|
581
|
-
# TODO(johnsom) Remove this in the "W" cycle
|
582
|
-
reload_listener_subflow = unordered_flow.Flow(
|
583
|
-
constants.AMPHORA_LISTENER_RELOAD_SUBFLOW)
|
584
|
-
|
585
|
-
for amp_index in range(0, lb_amp_count):
|
586
|
-
reload_listener_subflow.add(
|
587
|
-
amphora_driver_tasks.AmphoraIndexListenersReload(
|
588
|
-
name=(str(amp_index) + '-' +
|
589
|
-
constants.AMPHORA_RELOAD_LISTENER),
|
590
|
-
requires=(constants.LOADBALANCER, constants.AMPHORAE),
|
591
|
-
inject={constants.AMPHORA_INDEX: amp_index,
|
592
|
-
constants.TIMEOUT_DICT: timeout_dict}))
|
593
|
-
|
594
|
-
failover_amp_flow.add(reload_listener_subflow)
|
595
|
-
|
596
|
-
# Remove any extraneous ports
|
597
|
-
# Note: Nova sometimes fails to delete ports attached to an instance.
|
598
|
-
# For example, if you create an LB with a listener, then
|
599
|
-
# 'openstack server delete' the amphora, you will see the vrrp
|
600
|
-
# port attached to that instance will remain after the instance
|
601
|
-
# is deleted.
|
602
|
-
# TODO(johnsom) Fix this as part of
|
603
|
-
# https://storyboard.openstack.org/#!/story/2007077
|
604
|
-
|
605
|
-
# Mark LB ACTIVE
|
606
|
-
failover_amp_flow.add(
|
607
|
-
database_tasks.MarkLBActiveInDB(mark_subobjects=True,
|
608
|
-
requires=constants.LOADBALANCER))
|
609
|
-
|
610
|
-
return failover_amp_flow
|