octavia 12.0.0.0rc2__py3-none-any.whl → 13.0.0.0rc1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- octavia/amphorae/backends/agent/api_server/osutils.py +1 -0
- octavia/amphorae/backends/agent/api_server/plug.py +21 -7
- octavia/amphorae/backends/agent/api_server/templates/amphora-netns.systemd.j2 +2 -2
- octavia/amphorae/backends/agent/api_server/util.py +21 -0
- octavia/amphorae/backends/health_daemon/health_daemon.py +9 -3
- octavia/amphorae/backends/health_daemon/health_sender.py +2 -0
- octavia/amphorae/backends/utils/interface.py +14 -6
- octavia/amphorae/backends/utils/interface_file.py +6 -3
- octavia/amphorae/backends/utils/keepalivedlvs_query.py +8 -9
- octavia/amphorae/drivers/driver_base.py +1 -2
- octavia/amphorae/drivers/haproxy/rest_api_driver.py +11 -25
- octavia/amphorae/drivers/health/heartbeat_udp.py +34 -24
- octavia/amphorae/drivers/keepalived/jinja/jinja_cfg.py +3 -12
- octavia/amphorae/drivers/noop_driver/driver.py +3 -5
- octavia/api/common/pagination.py +4 -4
- octavia/api/drivers/amphora_driver/v2/driver.py +11 -5
- octavia/api/drivers/driver_agent/driver_get.py +22 -14
- octavia/api/drivers/driver_agent/driver_updater.py +8 -4
- octavia/api/drivers/utils.py +4 -2
- octavia/api/healthcheck/healthcheck_plugins.py +4 -2
- octavia/api/root_controller.py +4 -1
- octavia/api/v2/controllers/amphora.py +35 -38
- octavia/api/v2/controllers/availability_zone_profiles.py +43 -33
- octavia/api/v2/controllers/availability_zones.py +22 -18
- octavia/api/v2/controllers/flavor_profiles.py +37 -28
- octavia/api/v2/controllers/flavors.py +19 -15
- octavia/api/v2/controllers/health_monitor.py +44 -33
- octavia/api/v2/controllers/l7policy.py +52 -40
- octavia/api/v2/controllers/l7rule.py +68 -55
- octavia/api/v2/controllers/listener.py +88 -61
- octavia/api/v2/controllers/load_balancer.py +52 -34
- octavia/api/v2/controllers/member.py +63 -52
- octavia/api/v2/controllers/pool.py +55 -42
- octavia/api/v2/controllers/quotas.py +5 -3
- octavia/api/v2/types/listener.py +15 -0
- octavia/cmd/octavia_worker.py +0 -3
- octavia/cmd/status.py +1 -4
- octavia/common/clients.py +25 -45
- octavia/common/config.py +64 -22
- octavia/common/constants.py +3 -2
- octavia/common/data_models.py +7 -1
- octavia/common/jinja/haproxy/combined_listeners/jinja_cfg.py +12 -1
- octavia/common/jinja/haproxy/combined_listeners/templates/macros.j2 +5 -2
- octavia/common/jinja/lvs/jinja_cfg.py +4 -2
- octavia/common/keystone.py +58 -5
- octavia/common/validate.py +35 -0
- octavia/compute/drivers/noop_driver/driver.py +6 -0
- octavia/controller/healthmanager/health_manager.py +3 -6
- octavia/controller/housekeeping/house_keeping.py +36 -37
- octavia/controller/worker/amphora_rate_limit.py +5 -4
- octavia/controller/worker/task_utils.py +57 -41
- octavia/controller/worker/v2/controller_worker.py +160 -103
- octavia/controller/worker/v2/flows/listener_flows.py +3 -0
- octavia/controller/worker/v2/flows/load_balancer_flows.py +9 -14
- octavia/controller/worker/v2/tasks/amphora_driver_tasks.py +152 -91
- octavia/controller/worker/v2/tasks/compute_tasks.py +4 -2
- octavia/controller/worker/v2/tasks/database_tasks.py +542 -400
- octavia/controller/worker/v2/tasks/network_tasks.py +119 -79
- octavia/db/api.py +26 -23
- octavia/db/base_models.py +2 -2
- octavia/db/healthcheck.py +2 -1
- octavia/db/migration/alembic_migrations/versions/632152d2d32e_add_http_strict_transport_security_.py +42 -0
- octavia/db/models.py +12 -2
- octavia/db/prepare.py +2 -0
- octavia/db/repositories.py +462 -482
- octavia/hacking/checks.py +1 -1
- octavia/network/base.py +0 -14
- octavia/network/drivers/neutron/allowed_address_pairs.py +92 -135
- octavia/network/drivers/neutron/base.py +65 -77
- octavia/network/drivers/neutron/utils.py +69 -85
- octavia/network/drivers/noop_driver/driver.py +0 -7
- octavia/statistics/drivers/update_db.py +10 -10
- octavia/tests/common/constants.py +91 -84
- octavia/tests/common/sample_data_models.py +13 -1
- octavia/tests/fixtures.py +32 -0
- octavia/tests/functional/amphorae/backend/agent/api_server/test_server.py +9 -10
- octavia/tests/functional/api/drivers/driver_agent/test_driver_agent.py +260 -15
- octavia/tests/functional/api/test_root_controller.py +3 -28
- octavia/tests/functional/api/v2/base.py +5 -3
- octavia/tests/functional/api/v2/test_amphora.py +18 -5
- octavia/tests/functional/api/v2/test_availability_zone_profiles.py +1 -0
- octavia/tests/functional/api/v2/test_listener.py +51 -19
- octavia/tests/functional/api/v2/test_load_balancer.py +10 -1
- octavia/tests/functional/db/base.py +31 -16
- octavia/tests/functional/db/test_models.py +27 -28
- octavia/tests/functional/db/test_repositories.py +407 -50
- octavia/tests/unit/amphorae/backends/agent/api_server/test_amphora_info.py +2 -0
- octavia/tests/unit/amphorae/backends/agent/api_server/test_osutils.py +1 -1
- octavia/tests/unit/amphorae/backends/agent/api_server/test_plug.py +54 -6
- octavia/tests/unit/amphorae/backends/agent/api_server/test_util.py +35 -0
- octavia/tests/unit/amphorae/backends/health_daemon/test_health_daemon.py +8 -0
- octavia/tests/unit/amphorae/backends/health_daemon/test_health_sender.py +18 -0
- octavia/tests/unit/amphorae/backends/utils/test_interface.py +81 -0
- octavia/tests/unit/amphorae/backends/utils/test_interface_file.py +2 -0
- octavia/tests/unit/amphorae/backends/utils/test_keepalivedlvs_query.py +129 -5
- octavia/tests/unit/amphorae/drivers/haproxy/test_rest_api_driver_1_0.py +42 -20
- octavia/tests/unit/amphorae/drivers/health/test_heartbeat_udp.py +18 -20
- octavia/tests/unit/amphorae/drivers/keepalived/jinja/test_jinja_cfg.py +4 -4
- octavia/tests/unit/amphorae/drivers/noop_driver/test_driver.py +4 -1
- octavia/tests/unit/api/drivers/driver_agent/test_driver_get.py +3 -3
- octavia/tests/unit/api/drivers/driver_agent/test_driver_updater.py +11 -13
- octavia/tests/unit/base.py +6 -0
- octavia/tests/unit/cmd/test_interface.py +2 -2
- octavia/tests/unit/cmd/test_status.py +2 -2
- octavia/tests/unit/common/jinja/haproxy/combined_listeners/test_jinja_cfg.py +152 -1
- octavia/tests/unit/common/sample_configs/sample_configs_combined.py +10 -3
- octavia/tests/unit/common/test_clients.py +0 -39
- octavia/tests/unit/common/test_keystone.py +54 -0
- octavia/tests/unit/common/test_validate.py +67 -0
- octavia/tests/unit/controller/healthmanager/test_health_manager.py +8 -22
- octavia/tests/unit/controller/housekeeping/test_house_keeping.py +3 -64
- octavia/tests/unit/controller/worker/test_amphora_rate_limit.py +1 -1
- octavia/tests/unit/controller/worker/test_task_utils.py +44 -24
- octavia/tests/unit/controller/worker/v2/flows/test_load_balancer_flows.py +0 -1
- octavia/tests/unit/controller/worker/v2/tasks/test_amphora_driver_tasks.py +49 -26
- octavia/tests/unit/controller/worker/v2/tasks/test_database_tasks.py +399 -196
- octavia/tests/unit/controller/worker/v2/tasks/test_database_tasks_quota.py +37 -64
- octavia/tests/unit/controller/worker/v2/tasks/test_network_tasks.py +3 -14
- octavia/tests/unit/controller/worker/v2/test_controller_worker.py +2 -2
- octavia/tests/unit/network/drivers/neutron/test_allowed_address_pairs.py +456 -561
- octavia/tests/unit/network/drivers/neutron/test_base.py +181 -194
- octavia/tests/unit/network/drivers/neutron/test_utils.py +14 -30
- octavia/tests/unit/statistics/drivers/test_update_db.py +7 -5
- {octavia-12.0.0.0rc2.data → octavia-13.0.0.0rc1.data}/data/share/octavia/README.rst +1 -1
- {octavia-12.0.0.0rc2.dist-info → octavia-13.0.0.0rc1.dist-info}/AUTHORS +4 -0
- {octavia-12.0.0.0rc2.dist-info → octavia-13.0.0.0rc1.dist-info}/METADATA +4 -4
- {octavia-12.0.0.0rc2.dist-info → octavia-13.0.0.0rc1.dist-info}/RECORD +141 -189
- {octavia-12.0.0.0rc2.dist-info → octavia-13.0.0.0rc1.dist-info}/entry_points.txt +1 -2
- octavia-13.0.0.0rc1.dist-info/pbr.json +1 -0
- octavia/api/drivers/amphora_driver/v1/__init__.py +0 -11
- octavia/api/drivers/amphora_driver/v1/driver.py +0 -547
- octavia/controller/queue/v1/__init__.py +0 -11
- octavia/controller/queue/v1/consumer.py +0 -64
- octavia/controller/queue/v1/endpoints.py +0 -160
- octavia/controller/worker/v1/__init__.py +0 -11
- octavia/controller/worker/v1/controller_worker.py +0 -1157
- octavia/controller/worker/v1/flows/__init__.py +0 -11
- octavia/controller/worker/v1/flows/amphora_flows.py +0 -610
- octavia/controller/worker/v1/flows/health_monitor_flows.py +0 -105
- octavia/controller/worker/v1/flows/l7policy_flows.py +0 -94
- octavia/controller/worker/v1/flows/l7rule_flows.py +0 -100
- octavia/controller/worker/v1/flows/listener_flows.py +0 -128
- octavia/controller/worker/v1/flows/load_balancer_flows.py +0 -692
- octavia/controller/worker/v1/flows/member_flows.py +0 -230
- octavia/controller/worker/v1/flows/pool_flows.py +0 -127
- octavia/controller/worker/v1/tasks/__init__.py +0 -11
- octavia/controller/worker/v1/tasks/amphora_driver_tasks.py +0 -453
- octavia/controller/worker/v1/tasks/cert_task.py +0 -51
- octavia/controller/worker/v1/tasks/compute_tasks.py +0 -335
- octavia/controller/worker/v1/tasks/database_tasks.py +0 -2756
- octavia/controller/worker/v1/tasks/lifecycle_tasks.py +0 -173
- octavia/controller/worker/v1/tasks/model_tasks.py +0 -41
- octavia/controller/worker/v1/tasks/network_tasks.py +0 -970
- octavia/controller/worker/v1/tasks/retry_tasks.py +0 -74
- octavia/tests/unit/api/drivers/amphora_driver/v1/__init__.py +0 -11
- octavia/tests/unit/api/drivers/amphora_driver/v1/test_driver.py +0 -824
- octavia/tests/unit/controller/queue/v1/__init__.py +0 -11
- octavia/tests/unit/controller/queue/v1/test_consumer.py +0 -61
- octavia/tests/unit/controller/queue/v1/test_endpoints.py +0 -189
- octavia/tests/unit/controller/worker/v1/__init__.py +0 -11
- octavia/tests/unit/controller/worker/v1/flows/__init__.py +0 -11
- octavia/tests/unit/controller/worker/v1/flows/test_amphora_flows.py +0 -474
- octavia/tests/unit/controller/worker/v1/flows/test_health_monitor_flows.py +0 -72
- octavia/tests/unit/controller/worker/v1/flows/test_l7policy_flows.py +0 -67
- octavia/tests/unit/controller/worker/v1/flows/test_l7rule_flows.py +0 -67
- octavia/tests/unit/controller/worker/v1/flows/test_listener_flows.py +0 -91
- octavia/tests/unit/controller/worker/v1/flows/test_load_balancer_flows.py +0 -431
- octavia/tests/unit/controller/worker/v1/flows/test_member_flows.py +0 -106
- octavia/tests/unit/controller/worker/v1/flows/test_pool_flows.py +0 -77
- octavia/tests/unit/controller/worker/v1/tasks/__init__.py +0 -11
- octavia/tests/unit/controller/worker/v1/tasks/test_amphora_driver_tasks.py +0 -792
- octavia/tests/unit/controller/worker/v1/tasks/test_cert_task.py +0 -46
- octavia/tests/unit/controller/worker/v1/tasks/test_compute_tasks.py +0 -634
- octavia/tests/unit/controller/worker/v1/tasks/test_database_tasks.py +0 -2615
- octavia/tests/unit/controller/worker/v1/tasks/test_database_tasks_quota.py +0 -415
- octavia/tests/unit/controller/worker/v1/tasks/test_lifecycle_tasks.py +0 -401
- octavia/tests/unit/controller/worker/v1/tasks/test_model_tasks.py +0 -44
- octavia/tests/unit/controller/worker/v1/tasks/test_network_tasks.py +0 -1788
- octavia/tests/unit/controller/worker/v1/tasks/test_retry_tasks.py +0 -47
- octavia/tests/unit/controller/worker/v1/test_controller_worker.py +0 -2096
- octavia-12.0.0.0rc2.dist-info/pbr.json +0 -1
- {octavia-12.0.0.0rc2.data → octavia-13.0.0.0rc1.data}/data/share/octavia/LICENSE +0 -0
- {octavia-12.0.0.0rc2.data → octavia-13.0.0.0rc1.data}/data/share/octavia/diskimage-create/README.rst +0 -0
- {octavia-12.0.0.0rc2.data → octavia-13.0.0.0rc1.data}/data/share/octavia/diskimage-create/diskimage-create.sh +0 -0
- {octavia-12.0.0.0rc2.data → octavia-13.0.0.0rc1.data}/data/share/octavia/diskimage-create/image-tests.sh +0 -0
- {octavia-12.0.0.0rc2.data → octavia-13.0.0.0rc1.data}/data/share/octavia/diskimage-create/requirements.txt +0 -0
- {octavia-12.0.0.0rc2.data → octavia-13.0.0.0rc1.data}/data/share/octavia/diskimage-create/test-requirements.txt +0 -0
- {octavia-12.0.0.0rc2.data → octavia-13.0.0.0rc1.data}/data/share/octavia/diskimage-create/tox.ini +0 -0
- {octavia-12.0.0.0rc2.data → octavia-13.0.0.0rc1.data}/data/share/octavia/diskimage-create/version.txt +0 -0
- {octavia-12.0.0.0rc2.data → octavia-13.0.0.0rc1.data}/scripts/octavia-wsgi +0 -0
- {octavia-12.0.0.0rc2.dist-info → octavia-13.0.0.0rc1.dist-info}/LICENSE +0 -0
- {octavia-12.0.0.0rc2.dist-info → octavia-13.0.0.0rc1.dist-info}/WHEEL +0 -0
- {octavia-12.0.0.0rc2.dist-info → octavia-13.0.0.0rc1.dist-info}/top_level.txt +0 -0
@@ -21,7 +21,6 @@ keepalived_lvs = octavia.amphorae.backends.agent.api_server.keepalivedlvs:Keepal
|
|
21
21
|
|
22
22
|
[octavia.api.drivers]
|
23
23
|
amphora = octavia.api.drivers.amphora_driver.v2.driver:AmphoraProviderDriver
|
24
|
-
amphorav1 = octavia.api.drivers.amphora_driver.v1.driver:AmphoraProviderDriver
|
25
24
|
amphorav2 = octavia.api.drivers.amphora_driver.v2.driver:AmphoraProviderDriver
|
26
25
|
noop_driver = octavia.api.drivers.noop_driver.driver:NoopProviderDriver
|
27
26
|
noop_driver-alt = octavia.api.drivers.noop_driver.driver:NoopProviderDriver
|
@@ -59,7 +58,7 @@ containers_driver = octavia.network.drivers.neutron.containers:ContainersDriver
|
|
59
58
|
network_noop_driver = octavia.network.drivers.noop_driver.driver:NoopNetworkDriver
|
60
59
|
|
61
60
|
[octavia.plugins]
|
62
|
-
hot_plug_plugin = octavia.controller.worker.
|
61
|
+
hot_plug_plugin = octavia.controller.worker.v2.controller_worker:ControllerWorker
|
63
62
|
|
64
63
|
[octavia.statistics.drivers]
|
65
64
|
stats_db = octavia.statistics.drivers.update_db:StatsUpdateDb
|
@@ -0,0 +1 @@
|
|
1
|
+
{"git_version": "d7bba6c5", "is_release": true}
|
@@ -1,11 +0,0 @@
|
|
1
|
-
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
2
|
-
# not use this file except in compliance with the License. You may obtain
|
3
|
-
# a copy of the License at
|
4
|
-
#
|
5
|
-
# http://www.apache.org/licenses/LICENSE-2.0
|
6
|
-
#
|
7
|
-
# Unless required by applicable law or agreed to in writing, software
|
8
|
-
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
9
|
-
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
10
|
-
# License for the specific language governing permissions and limitations
|
11
|
-
# under the License.
|
@@ -1,547 +0,0 @@
|
|
1
|
-
# Copyright 2018 Rackspace, US Inc.
|
2
|
-
#
|
3
|
-
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
4
|
-
# not use this file except in compliance with the License. You may obtain
|
5
|
-
# a copy of the License at
|
6
|
-
#
|
7
|
-
# http://www.apache.org/licenses/LICENSE-2.0
|
8
|
-
#
|
9
|
-
# Unless required by applicable law or agreed to in writing, software
|
10
|
-
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
11
|
-
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
12
|
-
# License for the specific language governing permissions and limitations
|
13
|
-
# under the License.
|
14
|
-
|
15
|
-
from jsonschema import exceptions as js_exceptions
|
16
|
-
from jsonschema import validate
|
17
|
-
|
18
|
-
from oslo_config import cfg
|
19
|
-
from oslo_log import log as logging
|
20
|
-
import oslo_messaging as messaging
|
21
|
-
from stevedore import driver as stevedore_driver
|
22
|
-
|
23
|
-
from octavia_lib.api.drivers import data_models as driver_dm
|
24
|
-
from octavia_lib.api.drivers import exceptions
|
25
|
-
from octavia_lib.api.drivers import provider_base as driver_base
|
26
|
-
from octavia_lib.common import constants as lib_consts
|
27
|
-
|
28
|
-
from octavia.api.drivers.amphora_driver import availability_zone_schema
|
29
|
-
from octavia.api.drivers.amphora_driver import flavor_schema
|
30
|
-
from octavia.api.drivers import utils as driver_utils
|
31
|
-
from octavia.common import constants as consts
|
32
|
-
from octavia.common import data_models
|
33
|
-
from octavia.common import rpc
|
34
|
-
from octavia.common import utils
|
35
|
-
from octavia.db import api as db_apis
|
36
|
-
from octavia.db import repositories
|
37
|
-
from octavia.network import base as network_base
|
38
|
-
|
39
|
-
CONF = cfg.CONF
|
40
|
-
CONF.import_group('oslo_messaging', 'octavia.common.config')
|
41
|
-
LOG = logging.getLogger(__name__)
|
42
|
-
AMPHORA_SUPPORTED_LB_ALGORITHMS = [
|
43
|
-
consts.LB_ALGORITHM_ROUND_ROBIN,
|
44
|
-
consts.LB_ALGORITHM_SOURCE_IP,
|
45
|
-
consts.LB_ALGORITHM_LEAST_CONNECTIONS]
|
46
|
-
|
47
|
-
AMPHORA_SUPPORTED_PROTOCOLS = [
|
48
|
-
lib_consts.PROTOCOL_TCP,
|
49
|
-
lib_consts.PROTOCOL_HTTP,
|
50
|
-
lib_consts.PROTOCOL_HTTPS,
|
51
|
-
lib_consts.PROTOCOL_TERMINATED_HTTPS,
|
52
|
-
lib_consts.PROTOCOL_PROXY,
|
53
|
-
lib_consts.PROTOCOL_PROXYV2,
|
54
|
-
lib_consts.PROTOCOL_UDP,
|
55
|
-
lib_consts.PROTOCOL_SCTP,
|
56
|
-
lib_consts.PROTOCOL_PROMETHEUS,
|
57
|
-
]
|
58
|
-
|
59
|
-
VALID_L7POLICY_LISTENER_PROTOCOLS = [
|
60
|
-
lib_consts.PROTOCOL_HTTP,
|
61
|
-
lib_consts.PROTOCOL_TERMINATED_HTTPS
|
62
|
-
]
|
63
|
-
|
64
|
-
|
65
|
-
class AmphoraProviderDriver(driver_base.ProviderDriver):
|
66
|
-
def __init__(self):
|
67
|
-
super().__init__()
|
68
|
-
topic = cfg.CONF.oslo_messaging.topic
|
69
|
-
self.target = messaging.Target(
|
70
|
-
namespace=consts.RPC_NAMESPACE_CONTROLLER_AGENT,
|
71
|
-
topic=topic, version="1.0", fanout=False)
|
72
|
-
self.client = rpc.get_client(self.target)
|
73
|
-
self.repositories = repositories.Repositories()
|
74
|
-
|
75
|
-
def _validate_pool_algorithm(self, pool):
|
76
|
-
if pool.lb_algorithm not in AMPHORA_SUPPORTED_LB_ALGORITHMS:
|
77
|
-
msg = ('Amphora provider does not support %s algorithm.'
|
78
|
-
% pool.lb_algorithm)
|
79
|
-
raise exceptions.UnsupportedOptionError(
|
80
|
-
user_fault_string=msg,
|
81
|
-
operator_fault_string=msg)
|
82
|
-
|
83
|
-
def _validate_listener_protocol(self, listener):
|
84
|
-
if listener.protocol not in AMPHORA_SUPPORTED_PROTOCOLS:
|
85
|
-
msg = ('Amphora provider does not support %s protocol. '
|
86
|
-
'Supported: %s'
|
87
|
-
% (listener.protocol,
|
88
|
-
", ".join(AMPHORA_SUPPORTED_PROTOCOLS)))
|
89
|
-
raise exceptions.UnsupportedOptionError(
|
90
|
-
user_fault_string=msg,
|
91
|
-
operator_fault_string=msg)
|
92
|
-
|
93
|
-
def _validate_alpn_protocols(self, obj):
|
94
|
-
if not obj.alpn_protocols:
|
95
|
-
return
|
96
|
-
supported = consts.AMPHORA_SUPPORTED_ALPN_PROTOCOLS
|
97
|
-
not_supported = set(obj.alpn_protocols) - set(supported)
|
98
|
-
if not_supported:
|
99
|
-
msg = ('Amphora provider does not support %s ALPN protocol(s). '
|
100
|
-
'Supported: %s'
|
101
|
-
% (", ".join(not_supported), ", ".join(supported)))
|
102
|
-
raise exceptions.UnsupportedOptionError(
|
103
|
-
user_fault_string=msg,
|
104
|
-
operator_fault_string=msg)
|
105
|
-
|
106
|
-
# Load Balancer
|
107
|
-
def create_vip_port(self, loadbalancer_id, project_id, vip_dictionary,
|
108
|
-
additional_vip_dicts=None):
|
109
|
-
if additional_vip_dicts:
|
110
|
-
msg = 'Amphora v1 driver does not support additional_vips.'
|
111
|
-
raise exceptions.UnsupportedOptionError(
|
112
|
-
user_fault_string=msg,
|
113
|
-
operator_fault_string=msg)
|
114
|
-
vip_obj = driver_utils.provider_vip_dict_to_vip_obj(vip_dictionary)
|
115
|
-
lb_obj = data_models.LoadBalancer(id=loadbalancer_id,
|
116
|
-
project_id=project_id, vip=vip_obj)
|
117
|
-
|
118
|
-
network_driver = utils.get_network_driver()
|
119
|
-
vip_network = network_driver.get_network(
|
120
|
-
vip_dictionary[lib_consts.VIP_NETWORK_ID])
|
121
|
-
if not vip_network.port_security_enabled:
|
122
|
-
message = "Port security must be enabled on the VIP network."
|
123
|
-
raise exceptions.DriverError(user_fault_string=message,
|
124
|
-
operator_fault_string=message)
|
125
|
-
|
126
|
-
try:
|
127
|
-
# allocated_vip returns (vip, add_vips), skipping the 2nd element
|
128
|
-
# as amphorav1 doesn't support add_vips
|
129
|
-
vip = network_driver.allocate_vip(lb_obj)[0]
|
130
|
-
except network_base.AllocateVIPException as e:
|
131
|
-
message = str(e)
|
132
|
-
if getattr(e, 'orig_msg', None) is not None:
|
133
|
-
message = e.orig_msg
|
134
|
-
raise exceptions.DriverError(user_fault_string=message,
|
135
|
-
operator_fault_string=message)
|
136
|
-
|
137
|
-
LOG.info('Amphora provider created VIP port %s for load balancer %s.',
|
138
|
-
vip.port_id, loadbalancer_id)
|
139
|
-
return driver_utils.vip_dict_to_provider_dict(vip.to_dict()), []
|
140
|
-
|
141
|
-
# TODO(johnsom) convert this to octavia_lib constant flavor
|
142
|
-
# once octavia is transitioned to use octavia_lib
|
143
|
-
def loadbalancer_create(self, loadbalancer):
|
144
|
-
if loadbalancer.flavor == driver_dm.Unset:
|
145
|
-
loadbalancer.flavor = None
|
146
|
-
if loadbalancer.availability_zone == driver_dm.Unset:
|
147
|
-
loadbalancer.availability_zone = None
|
148
|
-
payload = {consts.LOAD_BALANCER_ID: loadbalancer.loadbalancer_id,
|
149
|
-
consts.FLAVOR: loadbalancer.flavor,
|
150
|
-
consts.AVAILABILITY_ZONE: loadbalancer.availability_zone}
|
151
|
-
self.client.cast({}, 'create_load_balancer', **payload)
|
152
|
-
|
153
|
-
def loadbalancer_delete(self, loadbalancer, cascade=False):
|
154
|
-
loadbalancer_id = loadbalancer.loadbalancer_id
|
155
|
-
payload = {consts.LOAD_BALANCER_ID: loadbalancer_id,
|
156
|
-
'cascade': cascade}
|
157
|
-
self.client.cast({}, 'delete_load_balancer', **payload)
|
158
|
-
|
159
|
-
def loadbalancer_failover(self, loadbalancer_id):
|
160
|
-
payload = {consts.LOAD_BALANCER_ID: loadbalancer_id}
|
161
|
-
self.client.cast({}, 'failover_load_balancer', **payload)
|
162
|
-
|
163
|
-
def loadbalancer_update(self, old_loadbalancer, new_loadbalancer):
|
164
|
-
# Adapt the provider data model to the queue schema
|
165
|
-
lb_dict = new_loadbalancer.to_dict()
|
166
|
-
if 'admin_state_up' in lb_dict:
|
167
|
-
lb_dict['enabled'] = lb_dict.pop('admin_state_up')
|
168
|
-
lb_id = lb_dict.pop('loadbalancer_id')
|
169
|
-
# Put the qos_policy_id back under the vip element the controller
|
170
|
-
# expects
|
171
|
-
vip_qos_policy_id = lb_dict.pop('vip_qos_policy_id', None)
|
172
|
-
if vip_qos_policy_id:
|
173
|
-
vip_dict = {"qos_policy_id": vip_qos_policy_id}
|
174
|
-
lb_dict["vip"] = vip_dict
|
175
|
-
|
176
|
-
payload = {consts.LOAD_BALANCER_ID: lb_id,
|
177
|
-
consts.LOAD_BALANCER_UPDATES: lb_dict}
|
178
|
-
self.client.cast({}, 'update_load_balancer', **payload)
|
179
|
-
|
180
|
-
# Listener
|
181
|
-
def listener_create(self, listener):
|
182
|
-
self._validate_listener_protocol(listener)
|
183
|
-
self._validate_alpn_protocols(listener)
|
184
|
-
payload = {consts.LISTENER_ID: listener.listener_id}
|
185
|
-
self.client.cast({}, 'create_listener', **payload)
|
186
|
-
|
187
|
-
def listener_delete(self, listener):
|
188
|
-
listener_id = listener.listener_id
|
189
|
-
payload = {consts.LISTENER_ID: listener_id}
|
190
|
-
self.client.cast({}, 'delete_listener', **payload)
|
191
|
-
|
192
|
-
def listener_update(self, old_listener, new_listener):
|
193
|
-
self._validate_alpn_protocols(new_listener)
|
194
|
-
listener_dict = new_listener.to_dict()
|
195
|
-
if 'admin_state_up' in listener_dict:
|
196
|
-
listener_dict['enabled'] = listener_dict.pop('admin_state_up')
|
197
|
-
listener_id = listener_dict.pop('listener_id')
|
198
|
-
if 'client_ca_tls_container_ref' in listener_dict:
|
199
|
-
listener_dict['client_ca_tls_container_id'] = listener_dict.pop(
|
200
|
-
'client_ca_tls_container_ref')
|
201
|
-
listener_dict.pop('client_ca_tls_container_data', None)
|
202
|
-
if 'client_crl_container_ref' in listener_dict:
|
203
|
-
listener_dict['client_crl_container_id'] = listener_dict.pop(
|
204
|
-
'client_crl_container_ref')
|
205
|
-
listener_dict.pop('client_crl_container_data', None)
|
206
|
-
|
207
|
-
payload = {consts.LISTENER_ID: listener_id,
|
208
|
-
consts.LISTENER_UPDATES: listener_dict}
|
209
|
-
self.client.cast({}, 'update_listener', **payload)
|
210
|
-
|
211
|
-
# Pool
|
212
|
-
def pool_create(self, pool):
|
213
|
-
self._validate_pool_algorithm(pool)
|
214
|
-
self._validate_alpn_protocols(pool)
|
215
|
-
payload = {consts.POOL_ID: pool.pool_id}
|
216
|
-
self.client.cast({}, 'create_pool', **payload)
|
217
|
-
|
218
|
-
def pool_delete(self, pool):
|
219
|
-
pool_id = pool.pool_id
|
220
|
-
payload = {consts.POOL_ID: pool_id}
|
221
|
-
self.client.cast({}, 'delete_pool', **payload)
|
222
|
-
|
223
|
-
def pool_update(self, old_pool, new_pool):
|
224
|
-
self._validate_alpn_protocols(new_pool)
|
225
|
-
if new_pool.lb_algorithm:
|
226
|
-
self._validate_pool_algorithm(new_pool)
|
227
|
-
pool_dict = new_pool.to_dict()
|
228
|
-
if 'admin_state_up' in pool_dict:
|
229
|
-
pool_dict['enabled'] = pool_dict.pop('admin_state_up')
|
230
|
-
pool_id = pool_dict.pop('pool_id')
|
231
|
-
if 'tls_container_ref' in pool_dict:
|
232
|
-
pool_dict['tls_certificate_id'] = pool_dict.pop(
|
233
|
-
'tls_container_ref')
|
234
|
-
pool_dict.pop('tls_container_data', None)
|
235
|
-
if 'ca_tls_container_ref' in pool_dict:
|
236
|
-
pool_dict['ca_tls_certificate_id'] = pool_dict.pop(
|
237
|
-
'ca_tls_container_ref')
|
238
|
-
pool_dict.pop('ca_tls_container_data', None)
|
239
|
-
if 'crl_container_ref' in pool_dict:
|
240
|
-
pool_dict['crl_container_id'] = pool_dict.pop('crl_container_ref')
|
241
|
-
pool_dict.pop('crl_container_data', None)
|
242
|
-
|
243
|
-
payload = {consts.POOL_ID: pool_id,
|
244
|
-
consts.POOL_UPDATES: pool_dict}
|
245
|
-
self.client.cast({}, 'update_pool', **payload)
|
246
|
-
|
247
|
-
# Member
|
248
|
-
def member_create(self, member):
|
249
|
-
pool_id = member.pool_id
|
250
|
-
db_pool = self.repositories.pool.get(db_apis.get_session(),
|
251
|
-
id=pool_id)
|
252
|
-
self._validate_members(db_pool, [member])
|
253
|
-
|
254
|
-
payload = {consts.MEMBER_ID: member.member_id}
|
255
|
-
self.client.cast({}, 'create_member', **payload)
|
256
|
-
|
257
|
-
def member_delete(self, member):
|
258
|
-
member_id = member.member_id
|
259
|
-
payload = {consts.MEMBER_ID: member_id}
|
260
|
-
self.client.cast({}, 'delete_member', **payload)
|
261
|
-
|
262
|
-
def member_update(self, old_member, new_member):
|
263
|
-
member_dict = new_member.to_dict()
|
264
|
-
if 'admin_state_up' in member_dict:
|
265
|
-
member_dict['enabled'] = member_dict.pop('admin_state_up')
|
266
|
-
member_id = member_dict.pop('member_id')
|
267
|
-
|
268
|
-
payload = {consts.MEMBER_ID: member_id,
|
269
|
-
consts.MEMBER_UPDATES: member_dict}
|
270
|
-
self.client.cast({}, 'update_member', **payload)
|
271
|
-
|
272
|
-
def member_batch_update(self, pool_id, members):
|
273
|
-
# The DB should not have updated yet, so we can still use the pool
|
274
|
-
db_pool = self.repositories.pool.get(db_apis.get_session(), id=pool_id)
|
275
|
-
|
276
|
-
self._validate_members(db_pool, members)
|
277
|
-
|
278
|
-
old_members = db_pool.members
|
279
|
-
|
280
|
-
old_member_ids = [m.id for m in old_members]
|
281
|
-
# The driver will always pass objects with IDs.
|
282
|
-
new_member_ids = [m.member_id for m in members]
|
283
|
-
|
284
|
-
# Find members that are brand new or updated
|
285
|
-
new_members = []
|
286
|
-
updated_members = []
|
287
|
-
for m in members:
|
288
|
-
if m.member_id not in old_member_ids:
|
289
|
-
new_members.append(m)
|
290
|
-
else:
|
291
|
-
member_dict = m.to_dict(render_unsets=False)
|
292
|
-
member_dict['id'] = member_dict.pop('member_id')
|
293
|
-
if 'address' in member_dict:
|
294
|
-
member_dict['ip_address'] = member_dict.pop('address')
|
295
|
-
if 'admin_state_up' in member_dict:
|
296
|
-
member_dict['enabled'] = member_dict.pop('admin_state_up')
|
297
|
-
updated_members.append(member_dict)
|
298
|
-
|
299
|
-
# Find members that are deleted
|
300
|
-
deleted_members = []
|
301
|
-
for m in old_members:
|
302
|
-
if m.id not in new_member_ids:
|
303
|
-
deleted_members.append(m)
|
304
|
-
|
305
|
-
payload = {'old_member_ids': [m.id for m in deleted_members],
|
306
|
-
'new_member_ids': [m.member_id for m in new_members],
|
307
|
-
'updated_members': updated_members}
|
308
|
-
self.client.cast({}, 'batch_update_members', **payload)
|
309
|
-
|
310
|
-
def _validate_members(self, db_pool, members):
|
311
|
-
if db_pool.protocol in consts.LVS_PROTOCOLS:
|
312
|
-
# For SCTP/UDP LBs, check that we are not mixing IPv4 and IPv6
|
313
|
-
for member in members:
|
314
|
-
member_is_ipv6 = utils.is_ipv6(member.address)
|
315
|
-
|
316
|
-
for listener in db_pool.listeners:
|
317
|
-
lb = listener.load_balancer
|
318
|
-
vip_is_ipv6 = utils.is_ipv6(lb.vip.ip_address)
|
319
|
-
|
320
|
-
if member_is_ipv6 != vip_is_ipv6:
|
321
|
-
msg = ("This provider doesn't support mixing IPv4 and "
|
322
|
-
"IPv6 addresses for its VIP and members in {} "
|
323
|
-
"load balancers.".format(db_pool.protocol))
|
324
|
-
raise exceptions.UnsupportedOptionError(
|
325
|
-
user_fault_string=msg,
|
326
|
-
operator_fault_string=msg)
|
327
|
-
|
328
|
-
# Health Monitor
|
329
|
-
def health_monitor_create(self, healthmonitor):
|
330
|
-
payload = {consts.HEALTH_MONITOR_ID: healthmonitor.healthmonitor_id}
|
331
|
-
self.client.cast({}, 'create_health_monitor', **payload)
|
332
|
-
|
333
|
-
def health_monitor_delete(self, healthmonitor):
|
334
|
-
healthmonitor_id = healthmonitor.healthmonitor_id
|
335
|
-
payload = {consts.HEALTH_MONITOR_ID: healthmonitor_id}
|
336
|
-
self.client.cast({}, 'delete_health_monitor', **payload)
|
337
|
-
|
338
|
-
def health_monitor_update(self, old_healthmonitor, new_healthmonitor):
|
339
|
-
healthmon_dict = new_healthmonitor.to_dict()
|
340
|
-
if 'admin_state_up' in healthmon_dict:
|
341
|
-
healthmon_dict['enabled'] = healthmon_dict.pop('admin_state_up')
|
342
|
-
if 'max_retries_down' in healthmon_dict:
|
343
|
-
healthmon_dict['fall_threshold'] = healthmon_dict.pop(
|
344
|
-
'max_retries_down')
|
345
|
-
if 'max_retries' in healthmon_dict:
|
346
|
-
healthmon_dict['rise_threshold'] = healthmon_dict.pop(
|
347
|
-
'max_retries')
|
348
|
-
healthmon_id = healthmon_dict.pop('healthmonitor_id')
|
349
|
-
|
350
|
-
payload = {consts.HEALTH_MONITOR_ID: healthmon_id,
|
351
|
-
consts.HEALTH_MONITOR_UPDATES: healthmon_dict}
|
352
|
-
self.client.cast({}, 'update_health_monitor', **payload)
|
353
|
-
|
354
|
-
# L7 Policy
|
355
|
-
def l7policy_create(self, l7policy):
|
356
|
-
db_listener = self.repositories.listener.get(db_apis.get_session(),
|
357
|
-
id=l7policy.listener_id)
|
358
|
-
if db_listener.protocol not in VALID_L7POLICY_LISTENER_PROTOCOLS:
|
359
|
-
msg = ('%s protocol listeners do not support L7 policies' % (
|
360
|
-
db_listener.protocol))
|
361
|
-
raise exceptions.UnsupportedOptionError(
|
362
|
-
user_fault_string=msg,
|
363
|
-
operator_fault_string=msg)
|
364
|
-
payload = {consts.L7POLICY_ID: l7policy.l7policy_id}
|
365
|
-
self.client.cast({}, 'create_l7policy', **payload)
|
366
|
-
|
367
|
-
def l7policy_delete(self, l7policy):
|
368
|
-
l7policy_id = l7policy.l7policy_id
|
369
|
-
payload = {consts.L7POLICY_ID: l7policy_id}
|
370
|
-
self.client.cast({}, 'delete_l7policy', **payload)
|
371
|
-
|
372
|
-
def l7policy_update(self, old_l7policy, new_l7policy):
|
373
|
-
l7policy_dict = new_l7policy.to_dict()
|
374
|
-
if 'admin_state_up' in l7policy_dict:
|
375
|
-
l7policy_dict['enabled'] = l7policy_dict.pop('admin_state_up')
|
376
|
-
l7policy_id = l7policy_dict.pop('l7policy_id')
|
377
|
-
|
378
|
-
payload = {consts.L7POLICY_ID: l7policy_id,
|
379
|
-
consts.L7POLICY_UPDATES: l7policy_dict}
|
380
|
-
self.client.cast({}, 'update_l7policy', **payload)
|
381
|
-
|
382
|
-
# L7 Rule
|
383
|
-
def l7rule_create(self, l7rule):
|
384
|
-
payload = {consts.L7RULE_ID: l7rule.l7rule_id}
|
385
|
-
self.client.cast({}, 'create_l7rule', **payload)
|
386
|
-
|
387
|
-
def l7rule_delete(self, l7rule):
|
388
|
-
l7rule_id = l7rule.l7rule_id
|
389
|
-
payload = {consts.L7RULE_ID: l7rule_id}
|
390
|
-
self.client.cast({}, 'delete_l7rule', **payload)
|
391
|
-
|
392
|
-
def l7rule_update(self, old_l7rule, new_l7rule):
|
393
|
-
l7rule_dict = new_l7rule.to_dict()
|
394
|
-
if 'admin_state_up' in l7rule_dict:
|
395
|
-
l7rule_dict['enabled'] = l7rule_dict.pop('admin_state_up')
|
396
|
-
l7rule_id = l7rule_dict.pop('l7rule_id')
|
397
|
-
|
398
|
-
payload = {consts.L7RULE_ID: l7rule_id,
|
399
|
-
consts.L7RULE_UPDATES: l7rule_dict}
|
400
|
-
self.client.cast({}, 'update_l7rule', **payload)
|
401
|
-
|
402
|
-
# Flavor
|
403
|
-
def get_supported_flavor_metadata(self):
|
404
|
-
"""Returns the valid flavor metadata keys and descriptions.
|
405
|
-
|
406
|
-
This extracts the valid flavor metadata keys and descriptions
|
407
|
-
from the JSON validation schema and returns it as a dictionary.
|
408
|
-
|
409
|
-
:return: Dictionary of flavor metadata keys and descriptions.
|
410
|
-
:raises DriverError: An unexpected error occurred.
|
411
|
-
"""
|
412
|
-
try:
|
413
|
-
props = flavor_schema.SUPPORTED_FLAVOR_SCHEMA['properties']
|
414
|
-
return {k: v.get('description', '') for k, v in props.items()}
|
415
|
-
except Exception as e:
|
416
|
-
raise exceptions.DriverError(
|
417
|
-
user_fault_string='Failed to get the supported flavor '
|
418
|
-
'metadata due to: {}'.format(str(e)),
|
419
|
-
operator_fault_string='Failed to get the supported flavor '
|
420
|
-
'metadata due to: {}'.format(str(e)))
|
421
|
-
|
422
|
-
def validate_flavor(self, flavor_dict):
|
423
|
-
"""Validates flavor profile data.
|
424
|
-
|
425
|
-
This will validate a flavor profile dataset against the flavor
|
426
|
-
settings the amphora driver supports.
|
427
|
-
|
428
|
-
:param flavor_dict: The flavor dictionary to validate.
|
429
|
-
:type flavor: dict
|
430
|
-
:return: None
|
431
|
-
:raises DriverError: An unexpected error occurred.
|
432
|
-
:raises UnsupportedOptionError: If the driver does not support
|
433
|
-
one of the flavor settings.
|
434
|
-
"""
|
435
|
-
try:
|
436
|
-
validate(flavor_dict, flavor_schema.SUPPORTED_FLAVOR_SCHEMA)
|
437
|
-
except js_exceptions.ValidationError as e:
|
438
|
-
error_object = ''
|
439
|
-
if e.relative_path:
|
440
|
-
error_object = '{} '.format(e.relative_path[0])
|
441
|
-
raise exceptions.UnsupportedOptionError(
|
442
|
-
user_fault_string='{0}{1}'.format(error_object, e.message),
|
443
|
-
operator_fault_string=str(e))
|
444
|
-
except Exception as e:
|
445
|
-
raise exceptions.DriverError(
|
446
|
-
user_fault_string='Failed to validate the flavor metadata '
|
447
|
-
'due to: {}'.format(str(e)),
|
448
|
-
operator_fault_string='Failed to validate the flavor metadata '
|
449
|
-
'due to: {}'.format(str(e)))
|
450
|
-
compute_flavor = flavor_dict.get(consts.COMPUTE_FLAVOR, None)
|
451
|
-
if compute_flavor:
|
452
|
-
compute_driver = stevedore_driver.DriverManager(
|
453
|
-
namespace='octavia.compute.drivers',
|
454
|
-
name=CONF.controller_worker.compute_driver,
|
455
|
-
invoke_on_load=True
|
456
|
-
).driver
|
457
|
-
|
458
|
-
# TODO(johnsom) Fix this to raise a NotFound error
|
459
|
-
# when the octavia-lib supports it.
|
460
|
-
compute_driver.validate_flavor(compute_flavor)
|
461
|
-
|
462
|
-
amp_image_tag = flavor_dict.get(consts.AMP_IMAGE_TAG, None)
|
463
|
-
if amp_image_tag:
|
464
|
-
image_driver = stevedore_driver.DriverManager(
|
465
|
-
namespace='octavia.image.drivers',
|
466
|
-
name=CONF.controller_worker.image_driver,
|
467
|
-
invoke_on_load=True
|
468
|
-
).driver
|
469
|
-
|
470
|
-
try:
|
471
|
-
image_driver.get_image_id_by_tag(
|
472
|
-
amp_image_tag, CONF.controller_worker.amp_image_owner_id)
|
473
|
-
except Exception as e:
|
474
|
-
raise exceptions.NotFound(
|
475
|
-
user_fault_string='Failed to find an image with tag {} '
|
476
|
-
'due to: {}'.format(
|
477
|
-
amp_image_tag, str(e)),
|
478
|
-
operator_fault_string='Failed to find an image with tag '
|
479
|
-
'{} due to: {}'.format(
|
480
|
-
amp_image_tag, str(e)))
|
481
|
-
|
482
|
-
# Availability Zone
|
483
|
-
def get_supported_availability_zone_metadata(self):
|
484
|
-
"""Returns the valid availability zone metadata keys and descriptions.
|
485
|
-
|
486
|
-
This extracts the valid availability zone metadata keys and
|
487
|
-
descriptions from the JSON validation schema and returns it as a
|
488
|
-
dictionary.
|
489
|
-
|
490
|
-
:return: Dictionary of availability zone metadata keys and descriptions
|
491
|
-
:raises DriverError: An unexpected error occurred.
|
492
|
-
"""
|
493
|
-
try:
|
494
|
-
props = (
|
495
|
-
availability_zone_schema.SUPPORTED_AVAILABILITY_ZONE_SCHEMA[
|
496
|
-
'properties'])
|
497
|
-
return {k: v.get('description', '') for k, v in props.items()}
|
498
|
-
except Exception as e:
|
499
|
-
raise exceptions.DriverError(
|
500
|
-
user_fault_string='Failed to get the supported availability '
|
501
|
-
'zone metadata due to: {}'.format(str(e)),
|
502
|
-
operator_fault_string='Failed to get the supported '
|
503
|
-
'availability zone metadata due to: '
|
504
|
-
'{}'.format(str(e)))
|
505
|
-
|
506
|
-
def validate_availability_zone(self, availability_zone_dict):
|
507
|
-
"""Validates availability zone profile data.
|
508
|
-
|
509
|
-
This will validate an availability zone profile dataset against the
|
510
|
-
availability zone settings the amphora driver supports.
|
511
|
-
|
512
|
-
:param availability_zone_dict: The availability zone dict to validate.
|
513
|
-
:type availability_zone_dict: dict
|
514
|
-
:return: None
|
515
|
-
:raises DriverError: An unexpected error occurred.
|
516
|
-
:raises UnsupportedOptionError: If the driver does not support
|
517
|
-
one of the availability zone settings.
|
518
|
-
"""
|
519
|
-
try:
|
520
|
-
validate(
|
521
|
-
availability_zone_dict,
|
522
|
-
availability_zone_schema.SUPPORTED_AVAILABILITY_ZONE_SCHEMA)
|
523
|
-
except js_exceptions.ValidationError as e:
|
524
|
-
error_object = ''
|
525
|
-
if e.relative_path:
|
526
|
-
error_object = '{} '.format(e.relative_path[0])
|
527
|
-
raise exceptions.UnsupportedOptionError(
|
528
|
-
user_fault_string='{0}{1}'.format(error_object, e.message),
|
529
|
-
operator_fault_string=str(e))
|
530
|
-
except Exception as e:
|
531
|
-
raise exceptions.DriverError(
|
532
|
-
user_fault_string='Failed to validate the availability zone '
|
533
|
-
'metadata due to: {}'.format(str(e)),
|
534
|
-
operator_fault_string='Failed to validate the availability '
|
535
|
-
'zone metadata due to: {}'.format(str(e))
|
536
|
-
)
|
537
|
-
compute_zone = availability_zone_dict.get(consts.COMPUTE_ZONE, None)
|
538
|
-
if compute_zone:
|
539
|
-
compute_driver = stevedore_driver.DriverManager(
|
540
|
-
namespace='octavia.compute.drivers',
|
541
|
-
name=CONF.controller_worker.compute_driver,
|
542
|
-
invoke_on_load=True
|
543
|
-
).driver
|
544
|
-
|
545
|
-
# TODO(johnsom) Fix this to raise a NotFound error
|
546
|
-
# when the octavia-lib supports it.
|
547
|
-
compute_driver.validate_availability_zone(compute_zone)
|
@@ -1,11 +0,0 @@
|
|
1
|
-
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
2
|
-
# not use this file except in compliance with the License. You may obtain
|
3
|
-
# a copy of the License at
|
4
|
-
#
|
5
|
-
# http://www.apache.org/licenses/LICENSE-2.0
|
6
|
-
#
|
7
|
-
# Unless required by applicable law or agreed to in writing, software
|
8
|
-
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
9
|
-
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
10
|
-
# License for the specific language governing permissions and limitations
|
11
|
-
# under the License.
|
@@ -1,64 +0,0 @@
|
|
1
|
-
# Copyright 2014 Rackspace
|
2
|
-
#
|
3
|
-
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
4
|
-
# not use this file except in compliance with the License. You may obtain
|
5
|
-
# a copy of the License at
|
6
|
-
#
|
7
|
-
# http://www.apache.org/licenses/LICENSE-2.0
|
8
|
-
#
|
9
|
-
# Unless required by applicable law or agreed to in writing, software
|
10
|
-
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
11
|
-
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
12
|
-
# License for the specific language governing permissions and limitations
|
13
|
-
# under the License.
|
14
|
-
|
15
|
-
import cotyledon
|
16
|
-
from oslo_log import log as logging
|
17
|
-
import oslo_messaging as messaging
|
18
|
-
from oslo_messaging.rpc import dispatcher
|
19
|
-
|
20
|
-
from octavia.common import rpc
|
21
|
-
from octavia.controller.queue.v1 import endpoints
|
22
|
-
|
23
|
-
LOG = logging.getLogger(__name__)
|
24
|
-
|
25
|
-
|
26
|
-
class ConsumerService(cotyledon.Service):
|
27
|
-
|
28
|
-
def __init__(self, worker_id, conf):
|
29
|
-
super().__init__(worker_id)
|
30
|
-
self.conf = conf
|
31
|
-
self.topic = conf.oslo_messaging.topic
|
32
|
-
self.server = conf.host
|
33
|
-
self.endpoints = []
|
34
|
-
self.access_policy = dispatcher.DefaultRPCAccessPolicy
|
35
|
-
self.message_listener = None
|
36
|
-
|
37
|
-
def run(self):
|
38
|
-
LOG.info('Starting consumer...')
|
39
|
-
target = messaging.Target(topic=self.topic, server=self.server,
|
40
|
-
fanout=False)
|
41
|
-
self.endpoints = [endpoints.Endpoints()]
|
42
|
-
self.message_listener = rpc.get_server(
|
43
|
-
target, self.endpoints,
|
44
|
-
executor='threading',
|
45
|
-
access_policy=self.access_policy
|
46
|
-
)
|
47
|
-
self.message_listener.start()
|
48
|
-
|
49
|
-
def terminate(self):
|
50
|
-
if self.message_listener:
|
51
|
-
LOG.info('Stopping consumer...')
|
52
|
-
self.message_listener.stop()
|
53
|
-
|
54
|
-
LOG.info('Consumer successfully stopped. Waiting for final '
|
55
|
-
'messages to be processed...')
|
56
|
-
self.message_listener.wait()
|
57
|
-
if self.endpoints:
|
58
|
-
LOG.info('Shutting down endpoint worker executors...')
|
59
|
-
for e in self.endpoints:
|
60
|
-
try:
|
61
|
-
e.worker.executor.shutdown()
|
62
|
-
except AttributeError:
|
63
|
-
pass
|
64
|
-
super().terminate()
|