octavia 13.0.0__py3-none-any.whl → 14.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- octavia/amphorae/backends/agent/api_server/lvs_listener_base.py +1 -1
- octavia/amphorae/backends/agent/api_server/osutils.py +5 -5
- octavia/amphorae/backends/agent/api_server/plug.py +3 -2
- octavia/amphorae/backends/agent/api_server/rules_schema.py +52 -0
- octavia/amphorae/backends/agent/api_server/server.py +28 -1
- octavia/amphorae/backends/utils/interface.py +45 -6
- octavia/amphorae/backends/utils/interface_file.py +9 -6
- octavia/amphorae/backends/utils/nftable_utils.py +125 -0
- octavia/amphorae/drivers/driver_base.py +27 -0
- octavia/amphorae/drivers/haproxy/rest_api_driver.py +42 -10
- octavia/amphorae/drivers/health/heartbeat_udp.py +2 -2
- octavia/amphorae/drivers/keepalived/vrrp_rest_driver.py +2 -1
- octavia/amphorae/drivers/noop_driver/driver.py +25 -0
- octavia/api/app.py +3 -0
- octavia/api/common/pagination.py +2 -2
- octavia/api/drivers/amphora_driver/flavor_schema.py +6 -1
- octavia/api/root_controller.py +4 -1
- octavia/api/v2/controllers/health_monitor.py +0 -1
- octavia/api/v2/controllers/l7policy.py +0 -1
- octavia/api/v2/controllers/l7rule.py +0 -1
- octavia/api/v2/controllers/listener.py +0 -1
- octavia/api/v2/controllers/load_balancer.py +13 -7
- octavia/api/v2/controllers/member.py +6 -3
- octavia/api/v2/controllers/pool.py +6 -7
- octavia/api/v2/types/load_balancer.py +5 -1
- octavia/api/v2/types/pool.py +1 -1
- octavia/certificates/common/pkcs12.py +9 -9
- octavia/certificates/manager/barbican.py +24 -16
- octavia/certificates/manager/castellan_mgr.py +12 -7
- octavia/certificates/manager/local.py +4 -4
- octavia/certificates/manager/noop.py +106 -0
- octavia/cmd/driver_agent.py +1 -1
- octavia/cmd/health_checker.py +0 -4
- octavia/cmd/health_manager.py +1 -5
- octavia/cmd/house_keeping.py +1 -1
- octavia/cmd/interface.py +0 -4
- octavia/cmd/octavia_worker.py +0 -4
- octavia/cmd/prometheus_proxy.py +0 -5
- octavia/cmd/status.py +0 -6
- octavia/common/base_taskflow.py +1 -1
- octavia/common/clients.py +15 -3
- octavia/common/config.py +24 -6
- octavia/common/constants.py +34 -0
- octavia/common/data_models.py +3 -1
- octavia/common/exceptions.py +11 -0
- octavia/common/jinja/haproxy/combined_listeners/templates/macros.j2 +7 -5
- octavia/common/keystone.py +7 -7
- octavia/common/tls_utils/cert_parser.py +24 -10
- octavia/common/utils.py +6 -0
- octavia/common/validate.py +2 -2
- octavia/compute/drivers/nova_driver.py +23 -5
- octavia/controller/worker/task_utils.py +28 -6
- octavia/controller/worker/v2/controller_worker.py +49 -15
- octavia/controller/worker/v2/flows/amphora_flows.py +120 -21
- octavia/controller/worker/v2/flows/flow_utils.py +15 -13
- octavia/controller/worker/v2/flows/listener_flows.py +95 -5
- octavia/controller/worker/v2/flows/load_balancer_flows.py +74 -30
- octavia/controller/worker/v2/taskflow_jobboard_driver.py +17 -1
- octavia/controller/worker/v2/tasks/amphora_driver_tasks.py +145 -24
- octavia/controller/worker/v2/tasks/compute_tasks.py +1 -1
- octavia/controller/worker/v2/tasks/database_tasks.py +72 -41
- octavia/controller/worker/v2/tasks/lifecycle_tasks.py +97 -41
- octavia/controller/worker/v2/tasks/network_tasks.py +57 -60
- octavia/controller/worker/v2/tasks/shim_tasks.py +28 -0
- octavia/db/migration/alembic_migrations/versions/55874a4ceed6_add_l7policy_action_redirect_prefix.py +1 -1
- octavia/db/migration/alembic_migrations/versions/5a3ee5472c31_add_cert_expiration__infor_in_amphora_table.py +1 -1
- octavia/db/migration/alembic_migrations/versions/6742ca1b27c2_add_l7policy_redirect_http_code.py +1 -1
- octavia/db/migration/alembic_migrations/versions/db2a73e82626_add_vnic_type_for_vip.py +36 -0
- octavia/db/models.py +1 -0
- octavia/db/prepare.py +1 -1
- octavia/db/repositories.py +53 -34
- octavia/distributor/drivers/driver_base.py +1 -1
- octavia/network/base.py +3 -16
- octavia/network/data_models.py +4 -1
- octavia/network/drivers/neutron/allowed_address_pairs.py +27 -26
- octavia/network/drivers/noop_driver/driver.py +10 -23
- octavia/tests/common/sample_certs.py +115 -0
- octavia/tests/common/sample_haproxy_prometheus +1 -1
- octavia/tests/functional/amphorae/backend/agent/api_server/test_server.py +37 -0
- octavia/tests/functional/api/test_healthcheck.py +2 -2
- octavia/tests/functional/api/v2/base.py +1 -1
- octavia/tests/functional/api/v2/test_listener.py +45 -0
- octavia/tests/functional/api/v2/test_load_balancer.py +17 -0
- octavia/tests/functional/db/base.py +9 -0
- octavia/tests/functional/db/test_models.py +2 -1
- octavia/tests/functional/db/test_repositories.py +55 -99
- octavia/tests/unit/amphorae/backends/agent/api_server/test_osutils.py +4 -2
- octavia/tests/unit/amphorae/backends/utils/test_interface.py +201 -1
- octavia/tests/unit/amphorae/backends/utils/test_keepalivedlvs_query.py +1 -1
- octavia/tests/unit/amphorae/backends/utils/test_nftable_utils.py +194 -0
- octavia/tests/unit/amphorae/drivers/haproxy/test_rest_api_driver.py +27 -5
- octavia/tests/unit/amphorae/drivers/haproxy/test_rest_api_driver_1_0.py +15 -2
- octavia/tests/unit/amphorae/drivers/keepalived/test_vrrp_rest_driver.py +17 -0
- octavia/tests/unit/amphorae/drivers/noop_driver/test_driver.py +2 -1
- octavia/tests/unit/api/v2/types/test_pool.py +71 -0
- octavia/tests/unit/certificates/manager/test_barbican.py +3 -3
- octavia/tests/unit/certificates/manager/test_noop.py +53 -0
- octavia/tests/unit/common/jinja/haproxy/combined_listeners/test_jinja_cfg.py +16 -17
- octavia/tests/unit/common/sample_configs/sample_configs_combined.py +5 -3
- octavia/tests/unit/common/test_config.py +35 -0
- octavia/tests/unit/common/test_keystone.py +32 -0
- octavia/tests/unit/common/test_utils.py +39 -0
- octavia/tests/unit/compute/drivers/test_nova_driver.py +22 -0
- octavia/tests/unit/controller/worker/test_task_utils.py +58 -2
- octavia/tests/unit/controller/worker/v2/flows/test_amphora_flows.py +28 -5
- octavia/tests/unit/controller/worker/v2/flows/test_listener_flows.py +64 -16
- octavia/tests/unit/controller/worker/v2/flows/test_load_balancer_flows.py +49 -9
- octavia/tests/unit/controller/worker/v2/tasks/test_amphora_driver_tasks.py +265 -17
- octavia/tests/unit/controller/worker/v2/tasks/test_database_tasks.py +101 -1
- octavia/tests/unit/controller/worker/v2/tasks/test_database_tasks_quota.py +19 -19
- octavia/tests/unit/controller/worker/v2/tasks/test_network_tasks.py +105 -42
- octavia/tests/unit/controller/worker/v2/tasks/test_shim_tasks.py +33 -0
- octavia/tests/unit/controller/worker/v2/test_controller_worker.py +85 -42
- octavia/tests/unit/network/drivers/neutron/test_allowed_address_pairs.py +48 -51
- octavia/tests/unit/network/drivers/neutron/test_utils.py +2 -0
- octavia/tests/unit/network/drivers/noop_driver/test_driver.py +0 -7
- {octavia-13.0.0.data → octavia-14.0.0.data}/data/share/octavia/diskimage-create/README.rst +6 -1
- {octavia-13.0.0.data → octavia-14.0.0.data}/data/share/octavia/diskimage-create/diskimage-create.sh +10 -4
- {octavia-13.0.0.data → octavia-14.0.0.data}/data/share/octavia/diskimage-create/requirements.txt +0 -2
- {octavia-13.0.0.data → octavia-14.0.0.data}/data/share/octavia/diskimage-create/tox.ini +30 -13
- {octavia-13.0.0.dist-info → octavia-14.0.0.dist-info}/AUTHORS +5 -0
- {octavia-13.0.0.dist-info → octavia-14.0.0.dist-info}/METADATA +6 -6
- {octavia-13.0.0.dist-info → octavia-14.0.0.dist-info}/RECORD +134 -126
- {octavia-13.0.0.dist-info → octavia-14.0.0.dist-info}/entry_points.txt +1 -1
- octavia-14.0.0.dist-info/pbr.json +1 -0
- octavia-13.0.0.dist-info/pbr.json +0 -1
- {octavia-13.0.0.data → octavia-14.0.0.data}/data/share/octavia/LICENSE +0 -0
- {octavia-13.0.0.data → octavia-14.0.0.data}/data/share/octavia/README.rst +0 -0
- {octavia-13.0.0.data → octavia-14.0.0.data}/data/share/octavia/diskimage-create/image-tests.sh +0 -0
- {octavia-13.0.0.data → octavia-14.0.0.data}/data/share/octavia/diskimage-create/test-requirements.txt +0 -0
- {octavia-13.0.0.data → octavia-14.0.0.data}/data/share/octavia/diskimage-create/version.txt +0 -0
- {octavia-13.0.0.data → octavia-14.0.0.data}/scripts/octavia-wsgi +0 -0
- {octavia-13.0.0.dist-info → octavia-14.0.0.dist-info}/LICENSE +0 -0
- {octavia-13.0.0.dist-info → octavia-14.0.0.dist-info}/WHEEL +0 -0
- {octavia-13.0.0.dist-info → octavia-14.0.0.dist-info}/top_level.txt +0 -0
@@ -85,6 +85,6 @@ class LvsListenerApiServerBase(object, metaclass=abc.ABCMeta):
|
|
85
85
|
:param listener_id: The id of the listener
|
86
86
|
|
87
87
|
:returns: HTTP response with status code.
|
88
|
-
:raises Exception: If
|
88
|
+
:raises Exception: If unsupported initial system of amphora.
|
89
89
|
|
90
90
|
"""
|
@@ -38,8 +38,7 @@ class BaseOS(object):
|
|
38
38
|
@classmethod
|
39
39
|
def _get_subclasses(cls):
|
40
40
|
for subclass in cls.__subclasses__():
|
41
|
-
|
42
|
-
yield sc
|
41
|
+
yield from subclass._get_subclasses()
|
43
42
|
yield subclass
|
44
43
|
|
45
44
|
@classmethod
|
@@ -65,14 +64,15 @@ class BaseOS(object):
|
|
65
64
|
interface.write()
|
66
65
|
|
67
66
|
def write_vip_interface_file(self, interface, vips, mtu, vrrp_info,
|
68
|
-
fixed_ips=None):
|
67
|
+
fixed_ips=None, is_sriov=False):
|
69
68
|
vip_interface = interface_file.VIPInterfaceFile(
|
70
69
|
name=interface,
|
71
70
|
mtu=mtu,
|
72
71
|
vips=vips,
|
73
72
|
vrrp_info=vrrp_info,
|
74
73
|
fixed_ips=fixed_ips,
|
75
|
-
topology=CONF.controller_worker.loadbalancer_topology
|
74
|
+
topology=CONF.controller_worker.loadbalancer_topology,
|
75
|
+
is_sriov=is_sriov)
|
76
76
|
vip_interface.write()
|
77
77
|
|
78
78
|
def write_port_interface_file(self, interface, fixed_ips, mtu):
|
@@ -116,7 +116,7 @@ class RH(BaseOS):
|
|
116
116
|
|
117
117
|
@classmethod
|
118
118
|
def is_os_name(cls, os_name):
|
119
|
-
return os_name in ['fedora', 'rhel']
|
119
|
+
return os_name in ['fedora', 'rhel', 'rocky']
|
120
120
|
|
121
121
|
def cmd_get_version_of_installed_package(self, package_name):
|
122
122
|
name = self._map_package_name(package_name)
|
@@ -78,7 +78,7 @@ class Plug(object):
|
|
78
78
|
|
79
79
|
def plug_vip(self, vip, subnet_cidr, gateway,
|
80
80
|
mac_address, mtu=None, vrrp_ip=None, host_routes=(),
|
81
|
-
additional_vips=()):
|
81
|
+
additional_vips=(), is_sriov=False):
|
82
82
|
vips = [{
|
83
83
|
'ip_address': vip,
|
84
84
|
'subnet_cidr': subnet_cidr,
|
@@ -118,7 +118,8 @@ class Plug(object):
|
|
118
118
|
interface=primary_interface,
|
119
119
|
vips=rendered_vips,
|
120
120
|
mtu=mtu,
|
121
|
-
vrrp_info=vrrp_info
|
121
|
+
vrrp_info=vrrp_info,
|
122
|
+
is_sriov=is_sriov)
|
122
123
|
|
123
124
|
# Update the list of interfaces to add to the namespace
|
124
125
|
# This is used in the amphora reboot case to re-establish the namespace
|
@@ -0,0 +1,52 @@
|
|
1
|
+
# Copyright 2024 Red Hat, Inc. All rights reserved.
|
2
|
+
#
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
4
|
+
# not use this file except in compliance with the License. You may obtain
|
5
|
+
# a copy of the License at
|
6
|
+
#
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
#
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
11
|
+
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
12
|
+
# License for the specific language governing permissions and limitations
|
13
|
+
# under the License.
|
14
|
+
from octavia_lib.common import constants as lib_consts
|
15
|
+
|
16
|
+
from octavia.common import constants as consts
|
17
|
+
|
18
|
+
# This is a JSON schema validation dictionary
|
19
|
+
# https://json-schema.org/latest/json-schema-validation.html
|
20
|
+
|
21
|
+
SUPPORTED_RULES_SCHEMA = {
|
22
|
+
'$schema': 'http://json-schema.org/draft-07/schema#',
|
23
|
+
'title': 'Octavia Amphora NFTables Rules Schema',
|
24
|
+
'description': 'This schema is used to validate an nftables rules JSON '
|
25
|
+
'document sent from a controller.',
|
26
|
+
'type': 'array',
|
27
|
+
'items': {
|
28
|
+
'additionalProperties': False,
|
29
|
+
'properties': {
|
30
|
+
consts.PROTOCOL: {
|
31
|
+
'type': 'string',
|
32
|
+
'description': 'The protocol for the rule. One of: '
|
33
|
+
'TCP, UDP, VRRP, SCTP',
|
34
|
+
'enum': list((lib_consts.PROTOCOL_SCTP,
|
35
|
+
lib_consts.PROTOCOL_TCP,
|
36
|
+
lib_consts.PROTOCOL_UDP,
|
37
|
+
consts.VRRP))
|
38
|
+
},
|
39
|
+
consts.CIDR: {
|
40
|
+
'type': ['string', 'null'],
|
41
|
+
'description': 'The allowed source CIDR.'
|
42
|
+
},
|
43
|
+
consts.PORT: {
|
44
|
+
'type': 'number',
|
45
|
+
'description': 'The protocol port number.',
|
46
|
+
'minimum': 1,
|
47
|
+
'maximum': 65535
|
48
|
+
}
|
49
|
+
},
|
50
|
+
'required': [consts.PROTOCOL, consts.CIDR, consts.PORT]
|
51
|
+
}
|
52
|
+
}
|
@@ -16,6 +16,7 @@ import os
|
|
16
16
|
import stat
|
17
17
|
|
18
18
|
import flask
|
19
|
+
from jsonschema import validate
|
19
20
|
from oslo_config import cfg
|
20
21
|
from oslo_log import log as logging
|
21
22
|
import webob
|
@@ -29,7 +30,9 @@ from octavia.amphorae.backends.agent.api_server import keepalivedlvs
|
|
29
30
|
from octavia.amphorae.backends.agent.api_server import loadbalancer
|
30
31
|
from octavia.amphorae.backends.agent.api_server import osutils
|
31
32
|
from octavia.amphorae.backends.agent.api_server import plug
|
33
|
+
from octavia.amphorae.backends.agent.api_server import rules_schema
|
32
34
|
from octavia.amphorae.backends.agent.api_server import util
|
35
|
+
from octavia.amphorae.backends.utils import nftable_utils
|
33
36
|
from octavia.common import constants as consts
|
34
37
|
|
35
38
|
|
@@ -137,6 +140,9 @@ class Server(object):
|
|
137
140
|
self.app.add_url_rule(rule=PATH_PREFIX + '/interface/<ip_addr>',
|
138
141
|
view_func=self.get_interface,
|
139
142
|
methods=['GET'])
|
143
|
+
self.app.add_url_rule(rule=PATH_PREFIX + '/interface/<ip_addr>/rules',
|
144
|
+
view_func=self.set_interface_rules,
|
145
|
+
methods=['PUT'])
|
140
146
|
|
141
147
|
def upload_haproxy_config(self, amphora_id, lb_id):
|
142
148
|
return self._loadbalancer.upload_haproxy_config(amphora_id, lb_id)
|
@@ -203,7 +209,8 @@ class Server(object):
|
|
203
209
|
net_info.get('mtu'),
|
204
210
|
net_info.get('vrrp_ip'),
|
205
211
|
net_info.get('host_routes', ()),
|
206
|
-
net_info.get('additional_vips', ())
|
212
|
+
net_info.get('additional_vips', ()),
|
213
|
+
net_info.get('is_sriov', False))
|
207
214
|
|
208
215
|
def plug_network(self):
|
209
216
|
try:
|
@@ -256,3 +263,23 @@ class Server(object):
|
|
256
263
|
|
257
264
|
def version_discovery(self):
|
258
265
|
return webob.Response(json={'api_version': api_server.VERSION})
|
266
|
+
|
267
|
+
def set_interface_rules(self, ip_addr):
|
268
|
+
interface_webob = self._amphora_info.get_interface(ip_addr)
|
269
|
+
|
270
|
+
if interface_webob.status_code != 200:
|
271
|
+
return interface_webob
|
272
|
+
interface = interface_webob.json['interface']
|
273
|
+
|
274
|
+
try:
|
275
|
+
rules_info = flask.request.get_json()
|
276
|
+
validate(rules_info, rules_schema.SUPPORTED_RULES_SCHEMA)
|
277
|
+
except Exception as e:
|
278
|
+
raise exceptions.BadRequest(
|
279
|
+
description='Invalid rules information') from e
|
280
|
+
|
281
|
+
nftable_utils.write_nftable_vip_rules_file(interface, rules_info)
|
282
|
+
|
283
|
+
nftable_utils.load_nftables_file()
|
284
|
+
|
285
|
+
return webob.Response(json={'message': 'OK'}, status=200)
|
@@ -28,6 +28,7 @@ from pyroute2.netlink.rtnl import ifaddrmsg
|
|
28
28
|
from pyroute2.netlink.rtnl import rt_proto
|
29
29
|
|
30
30
|
from octavia.amphorae.backends.utils import interface_file
|
31
|
+
from octavia.amphorae.backends.utils import nftable_utils
|
31
32
|
from octavia.common import constants as consts
|
32
33
|
from octavia.common import exceptions
|
33
34
|
|
@@ -175,9 +176,48 @@ class InterfaceController(object):
|
|
175
176
|
ip_network = ipaddress.ip_network(address, strict=False)
|
176
177
|
return ip_network.compressed
|
177
178
|
|
179
|
+
def _setup_nftables_chain(self, interface):
|
180
|
+
# TODO(johnsom) Move this to pyroute2 when the nftables library
|
181
|
+
# improves.
|
182
|
+
|
183
|
+
# Create the nftable
|
184
|
+
cmd = [consts.NFT_CMD, consts.NFT_ADD, 'table', consts.NFT_FAMILY,
|
185
|
+
consts.NFT_VIP_TABLE]
|
186
|
+
try:
|
187
|
+
subprocess.check_output(cmd, stderr=subprocess.STDOUT)
|
188
|
+
except Exception as e:
|
189
|
+
if hasattr(e, 'output'):
|
190
|
+
LOG.error(e.output)
|
191
|
+
else:
|
192
|
+
LOG.error(e)
|
193
|
+
raise
|
194
|
+
|
195
|
+
# Create the chain with -310 priority to put it in front of the
|
196
|
+
# lvs-masquerade configured chain
|
197
|
+
cmd = [consts.NFT_CMD, consts.NFT_ADD, 'chain', consts.NFT_FAMILY,
|
198
|
+
consts.NFT_VIP_TABLE, consts.NFT_VIP_CHAIN,
|
199
|
+
'{', 'type', 'filter', 'hook', 'ingress', 'device',
|
200
|
+
interface.name, 'priority', consts.NFT_SRIOV_PRIORITY, ';',
|
201
|
+
'policy', 'drop', ';', '}']
|
202
|
+
try:
|
203
|
+
subprocess.check_output(cmd, stderr=subprocess.STDOUT)
|
204
|
+
except Exception as e:
|
205
|
+
if hasattr(e, 'output'):
|
206
|
+
LOG.error(e.output)
|
207
|
+
else:
|
208
|
+
LOG.error(e)
|
209
|
+
raise
|
210
|
+
|
211
|
+
nftable_utils.write_nftable_vip_rules_file(interface.name, [])
|
212
|
+
|
213
|
+
nftable_utils.load_nftables_file()
|
214
|
+
|
178
215
|
def up(self, interface):
|
179
216
|
LOG.info("Setting interface %s up", interface.name)
|
180
217
|
|
218
|
+
if interface.is_sriov:
|
219
|
+
self._setup_nftables_chain(interface)
|
220
|
+
|
181
221
|
with pyroute2.IPRoute() as ipr:
|
182
222
|
idx = ipr.link_lookup(ifname=interface.name)[0]
|
183
223
|
|
@@ -227,7 +267,7 @@ class InterfaceController(object):
|
|
227
267
|
if key in current_addresses:
|
228
268
|
current_addresses.remove(key)
|
229
269
|
elif address.get(consts.OCTAVIA_OWNED, True):
|
230
|
-
# By default all
|
270
|
+
# By default all addresses are managed/owned by Octavia
|
231
271
|
address[consts.FAMILY] = self._family(
|
232
272
|
address[consts.ADDRESS])
|
233
273
|
LOG.debug("%s: Adding address %s", interface.name,
|
@@ -356,11 +396,10 @@ class InterfaceController(object):
|
|
356
396
|
**rule)
|
357
397
|
|
358
398
|
def _scripts_up(self, interface, current_state):
|
359
|
-
|
360
|
-
|
361
|
-
|
362
|
-
|
363
|
-
subprocess.check_output(script[consts.COMMAND].split())
|
399
|
+
for script in interface.scripts[consts.IFACE_UP]:
|
400
|
+
LOG.debug("%s: Running command '%s'",
|
401
|
+
interface.name, script[consts.COMMAND])
|
402
|
+
subprocess.check_output(script[consts.COMMAND].split())
|
364
403
|
|
365
404
|
def down(self, interface):
|
366
405
|
LOG.info("Setting interface %s down", interface.name)
|
@@ -25,9 +25,8 @@ CONF = cfg.CONF
|
|
25
25
|
|
26
26
|
|
27
27
|
class InterfaceFile(object):
|
28
|
-
def __init__(self, name, if_type,
|
29
|
-
|
30
|
-
routes=None, rules=None, scripts=None):
|
28
|
+
def __init__(self, name, if_type, mtu=None, addresses=None,
|
29
|
+
routes=None, rules=None, scripts=None, is_sriov=False):
|
31
30
|
self.name = name
|
32
31
|
self.if_type = if_type
|
33
32
|
self.mtu = mtu
|
@@ -38,6 +37,7 @@ class InterfaceFile(object):
|
|
38
37
|
consts.IFACE_UP: [],
|
39
38
|
consts.IFACE_DOWN: []
|
40
39
|
}
|
40
|
+
self.is_sriov = is_sriov
|
41
41
|
|
42
42
|
@classmethod
|
43
43
|
def get_extensions(cls):
|
@@ -98,7 +98,8 @@ class InterfaceFile(object):
|
|
98
98
|
consts.ADDRESSES: self.addresses,
|
99
99
|
consts.ROUTES: self.routes,
|
100
100
|
consts.RULES: self.rules,
|
101
|
-
consts.SCRIPTS: self.scripts
|
101
|
+
consts.SCRIPTS: self.scripts,
|
102
|
+
consts.IS_SRIOV: self.is_sriov
|
102
103
|
}
|
103
104
|
if self.mtu:
|
104
105
|
interface[consts.MTU] = self.mtu
|
@@ -106,12 +107,14 @@ class InterfaceFile(object):
|
|
106
107
|
|
107
108
|
|
108
109
|
class VIPInterfaceFile(InterfaceFile):
|
109
|
-
def __init__(self, name, mtu, vips, vrrp_info, fixed_ips, topology
|
110
|
+
def __init__(self, name, mtu, vips, vrrp_info, fixed_ips, topology,
|
111
|
+
is_sriov=False):
|
110
112
|
|
111
|
-
super().__init__(name, if_type=consts.VIP, mtu=mtu)
|
113
|
+
super().__init__(name, if_type=consts.VIP, mtu=mtu, is_sriov=is_sriov)
|
112
114
|
|
113
115
|
has_ipv4 = any(vip['ip_version'] == 4 for vip in vips)
|
114
116
|
has_ipv6 = any(vip['ip_version'] == 6 for vip in vips)
|
117
|
+
|
115
118
|
if vrrp_info:
|
116
119
|
self.addresses.append({
|
117
120
|
consts.ADDRESS: vrrp_info['ip'],
|
@@ -0,0 +1,125 @@
|
|
1
|
+
# Copyright 2024 Red Hat, Inc. All rights reserved.
|
2
|
+
#
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
4
|
+
# not use this file except in compliance with the License. You may obtain
|
5
|
+
# a copy of the License at
|
6
|
+
#
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
#
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
11
|
+
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
12
|
+
# License for the specific language governing permissions and limitations
|
13
|
+
# under the License.
|
14
|
+
import os
|
15
|
+
import stat
|
16
|
+
import subprocess
|
17
|
+
|
18
|
+
from octavia_lib.common import constants as lib_consts
|
19
|
+
from oslo_log import log as logging
|
20
|
+
from webob import exc
|
21
|
+
|
22
|
+
from octavia.amphorae.backends.utils import network_namespace
|
23
|
+
from octavia.common import constants as consts
|
24
|
+
from octavia.common import utils
|
25
|
+
|
26
|
+
LOG = logging.getLogger(__name__)
|
27
|
+
|
28
|
+
|
29
|
+
def write_nftable_vip_rules_file(interface_name, rules):
|
30
|
+
flags = os.O_WRONLY | os.O_CREAT | os.O_TRUNC
|
31
|
+
# mode 00600
|
32
|
+
mode = stat.S_IRUSR | stat.S_IWUSR
|
33
|
+
|
34
|
+
# Create some strings shared on both code paths
|
35
|
+
table_string = f'table {consts.NFT_FAMILY} {consts.NFT_VIP_TABLE} {{\n'
|
36
|
+
chain_string = f' chain {consts.NFT_VIP_CHAIN} {{\n'
|
37
|
+
hook_string = (f' type filter hook ingress device {interface_name} '
|
38
|
+
f'priority {consts.NFT_SRIOV_PRIORITY}; policy drop;\n')
|
39
|
+
|
40
|
+
# Allow ICMP destination unreachable for PMTUD
|
41
|
+
icmp_string = ' icmp type destination-unreachable accept\n'
|
42
|
+
# Allow the required neighbor solicitation/discovery PMTUD ICMPV6
|
43
|
+
icmpv6_string = (' icmpv6 type { nd-neighbor-solicit, '
|
44
|
+
'nd-router-advert, nd-neighbor-advert, packet-too-big, '
|
45
|
+
'destination-unreachable } accept\n')
|
46
|
+
# Allow DHCP responses
|
47
|
+
dhcp_string = ' udp sport 67 udp dport 68 accept\n'
|
48
|
+
dhcpv6_string = ' udp sport 547 udp dport 546 accept\n'
|
49
|
+
|
50
|
+
# Check if an existing rules file exists or we be need to create an
|
51
|
+
# "drop all" file with no rules except for VRRP. If it exists, we should
|
52
|
+
# not overwrite it here as it could be a reboot unless we were passed new
|
53
|
+
# rules.
|
54
|
+
if os.path.isfile(consts.NFT_VIP_RULES_FILE):
|
55
|
+
if not rules:
|
56
|
+
return
|
57
|
+
with os.fdopen(
|
58
|
+
os.open(consts.NFT_VIP_RULES_FILE, flags, mode), 'w') as file:
|
59
|
+
# Clear the existing rules in the kernel
|
60
|
+
# Note: The "nft -f" method is atomic, so clearing the rules will
|
61
|
+
# not leave the amphora exposed.
|
62
|
+
# Create and delete the table to not get errors if the table does
|
63
|
+
# not exist yet.
|
64
|
+
file.write(f'table {consts.NFT_FAMILY} {consts.NFT_VIP_TABLE} '
|
65
|
+
'{}\n')
|
66
|
+
file.write(f'delete table {consts.NFT_FAMILY} '
|
67
|
+
f'{consts.NFT_VIP_TABLE}\n')
|
68
|
+
file.write(table_string)
|
69
|
+
file.write(chain_string)
|
70
|
+
file.write(hook_string)
|
71
|
+
file.write(icmp_string)
|
72
|
+
file.write(icmpv6_string)
|
73
|
+
file.write(dhcp_string)
|
74
|
+
file.write(dhcpv6_string)
|
75
|
+
for rule in rules:
|
76
|
+
file.write(f' {_build_rule_cmd(rule)}\n')
|
77
|
+
file.write(' }\n') # close the chain
|
78
|
+
file.write('}\n') # close the table
|
79
|
+
else: # No existing rules, create the "drop all" base rules
|
80
|
+
with os.fdopen(
|
81
|
+
os.open(consts.NFT_VIP_RULES_FILE, flags, mode), 'w') as file:
|
82
|
+
file.write(table_string)
|
83
|
+
file.write(chain_string)
|
84
|
+
file.write(hook_string)
|
85
|
+
file.write(icmp_string)
|
86
|
+
file.write(icmpv6_string)
|
87
|
+
file.write(dhcp_string)
|
88
|
+
file.write(dhcpv6_string)
|
89
|
+
file.write(' }\n') # close the chain
|
90
|
+
file.write('}\n') # close the table
|
91
|
+
|
92
|
+
|
93
|
+
def _build_rule_cmd(rule):
|
94
|
+
prefix_saddr = ''
|
95
|
+
if rule[consts.CIDR] and rule[consts.CIDR] != '0.0.0.0/0':
|
96
|
+
cidr_ip_version = utils.ip_version(rule[consts.CIDR].split('/')[0])
|
97
|
+
if cidr_ip_version == 4:
|
98
|
+
prefix_saddr = f'ip saddr {rule[consts.CIDR]} '
|
99
|
+
elif cidr_ip_version == 6:
|
100
|
+
prefix_saddr = f'ip6 saddr {rule[consts.CIDR]} '
|
101
|
+
else:
|
102
|
+
raise exc.HTTPBadRequest(explanation='Unknown ip version')
|
103
|
+
|
104
|
+
if rule[consts.PROTOCOL] == lib_consts.PROTOCOL_SCTP:
|
105
|
+
return f'{prefix_saddr}sctp dport {rule[consts.PORT]} accept'
|
106
|
+
if rule[consts.PROTOCOL] == lib_consts.PROTOCOL_TCP:
|
107
|
+
return f'{prefix_saddr}tcp dport {rule[consts.PORT]} accept'
|
108
|
+
if rule[consts.PROTOCOL] == lib_consts.PROTOCOL_UDP:
|
109
|
+
return f'{prefix_saddr}udp dport {rule[consts.PORT]} accept'
|
110
|
+
if rule[consts.PROTOCOL] == consts.VRRP:
|
111
|
+
return f'{prefix_saddr}ip protocol 112 accept'
|
112
|
+
raise exc.HTTPBadRequest(explanation='Unknown protocol used in rules')
|
113
|
+
|
114
|
+
|
115
|
+
def load_nftables_file():
|
116
|
+
cmd = [consts.NFT_CMD, '-o', '-f', consts.NFT_VIP_RULES_FILE]
|
117
|
+
try:
|
118
|
+
with network_namespace.NetworkNamespace(consts.AMPHORA_NAMESPACE):
|
119
|
+
subprocess.check_output(cmd, stderr=subprocess.STDOUT)
|
120
|
+
except Exception as e:
|
121
|
+
if hasattr(e, 'output'):
|
122
|
+
LOG.error(e.output)
|
123
|
+
else:
|
124
|
+
LOG.error(e)
|
125
|
+
raise
|
@@ -14,6 +14,9 @@
|
|
14
14
|
# under the License.
|
15
15
|
|
16
16
|
import abc
|
17
|
+
from typing import Optional
|
18
|
+
|
19
|
+
from octavia.db import models as db_models
|
17
20
|
|
18
21
|
|
19
22
|
class AmphoraLoadBalancerDriver(object, metaclass=abc.ABCMeta):
|
@@ -236,6 +239,30 @@ class AmphoraLoadBalancerDriver(object, metaclass=abc.ABCMeta):
|
|
236
239
|
:type timeout_dict: dict
|
237
240
|
"""
|
238
241
|
|
242
|
+
@abc.abstractmethod
|
243
|
+
def check(self, amphora: db_models.Amphora,
|
244
|
+
timeout_dict: Optional[dict] = None):
|
245
|
+
"""Check connectivity to the amphora.
|
246
|
+
|
247
|
+
:param amphora: The amphora to query.
|
248
|
+
:param timeout_dict: Dictionary of timeout values for calls to the
|
249
|
+
amphora. May contain: req_conn_timeout,
|
250
|
+
req_read_timeout, conn_max_retries,
|
251
|
+
conn_retry_interval
|
252
|
+
:raises TimeOutException: The amphora didn't reply
|
253
|
+
"""
|
254
|
+
|
255
|
+
@abc.abstractmethod
|
256
|
+
def set_interface_rules(self, amphora: db_models.Amphora, ip_address,
|
257
|
+
rules):
|
258
|
+
"""Sets interface firewall rules in the amphora
|
259
|
+
|
260
|
+
:param amphora: The amphora to query.
|
261
|
+
:param ip_address: The IP address assigned to the interface the rules
|
262
|
+
will be applied on.
|
263
|
+
:param rules: The l1st of allow rules to apply.
|
264
|
+
"""
|
265
|
+
|
239
266
|
|
240
267
|
class VRRPDriverMixin(object, metaclass=abc.ABCMeta):
|
241
268
|
"""Abstract mixin class for VRRP support in loadbalancer amphorae
|
@@ -111,6 +111,11 @@ class HaproxyAmphoraLoadBalancerDriver(
|
|
111
111
|
|
112
112
|
return api_version
|
113
113
|
|
114
|
+
def check(self, amphora: db_models.Amphora,
|
115
|
+
timeout_dict: Optional[dict] = None):
|
116
|
+
"""Check connectivity to the amphora."""
|
117
|
+
self._populate_amphora_api_version(amphora, timeout_dict)
|
118
|
+
|
114
119
|
def update_amphora_listeners(self, loadbalancer, amphora,
|
115
120
|
timeout_dict=None):
|
116
121
|
"""Update the amphora with a new configuration.
|
@@ -313,7 +318,7 @@ class HaproxyAmphoraLoadBalancerDriver(
|
|
313
318
|
# We have other listeners, so just update is fine.
|
314
319
|
# TODO(rm_work): This is a little inefficient since this duplicates
|
315
320
|
# a lot of the detection logic that has already been done, but it
|
316
|
-
# is probably safer to
|
321
|
+
# is probably safer to reuse the existing code-path.
|
317
322
|
self.update_amphora_listeners(listener.load_balancer, amphora)
|
318
323
|
else:
|
319
324
|
# Deleting the last listener, so really do the delete
|
@@ -335,7 +340,7 @@ class HaproxyAmphoraLoadBalancerDriver(
|
|
335
340
|
def finalize_amphora(self, amphora):
|
336
341
|
pass
|
337
342
|
|
338
|
-
def _build_net_info(self, port, amphora, subnet, mtu=None):
|
343
|
+
def _build_net_info(self, port, amphora, subnet, mtu=None, sriov=False):
|
339
344
|
# NOTE(blogan): using the vrrp port here because that
|
340
345
|
# is what the allowed address pairs network driver sets
|
341
346
|
# this particular port to. This does expose a bit of
|
@@ -354,7 +359,8 @@ class HaproxyAmphoraLoadBalancerDriver(
|
|
354
359
|
'vrrp_ip': amphora[consts.VRRP_IP],
|
355
360
|
'mtu': mtu or port[consts.NETWORK][consts.MTU],
|
356
361
|
'host_routes': host_routes,
|
357
|
-
'additional_vips': []
|
362
|
+
'additional_vips': [],
|
363
|
+
'is_sriov': sriov}
|
358
364
|
return net_info
|
359
365
|
|
360
366
|
def post_vip_plug(self, amphora, load_balancer, amphorae_network_config,
|
@@ -365,9 +371,12 @@ class HaproxyAmphoraLoadBalancerDriver(
|
|
365
371
|
mtu = port[consts.NETWORK][consts.MTU]
|
366
372
|
LOG.debug("Post-VIP-Plugging with vrrp_ip %s vrrp_port %s",
|
367
373
|
amphora.vrrp_ip, port[consts.ID])
|
374
|
+
sriov = False
|
375
|
+
if load_balancer.vip.vnic_type == consts.VNIC_TYPE_DIRECT:
|
376
|
+
sriov = True
|
368
377
|
net_info = self._build_net_info(
|
369
378
|
port, amphora.to_dict(),
|
370
|
-
vip_subnet.to_dict(recurse=True), mtu)
|
379
|
+
vip_subnet.to_dict(recurse=True), mtu, sriov)
|
371
380
|
for add_vip in additional_vip_data:
|
372
381
|
add_host_routes = [{'nexthop': hr.nexthop,
|
373
382
|
'destination': hr.destination}
|
@@ -579,15 +588,33 @@ class HaproxyAmphoraLoadBalancerDriver(
|
|
579
588
|
req_read_timeout, conn_max_retries,
|
580
589
|
conn_retry_interval
|
581
590
|
:type timeout_dict: dict
|
582
|
-
:returns:
|
591
|
+
:returns: the interface name string if found.
|
592
|
+
:raises octavia.amphorae.drivers.haproxy.exceptions.NotFound:
|
593
|
+
No interface found on the amphora
|
594
|
+
:raises TimeOutException: The amphora didn't reply
|
595
|
+
"""
|
596
|
+
self._populate_amphora_api_version(amphora, timeout_dict)
|
597
|
+
response_json = self.clients[amphora.api_version].get_interface(
|
598
|
+
amphora, ip_address, timeout_dict, log_error=False)
|
599
|
+
return response_json.get('interface', None)
|
600
|
+
|
601
|
+
def set_interface_rules(self, amphora: db_models.Amphora,
|
602
|
+
ip_address, rules, timeout_dict=None):
|
603
|
+
"""Sets interface firewall rules in the amphora
|
604
|
+
|
605
|
+
:param amphora: The amphora to query.
|
606
|
+
:param ip_address: The IP address assigned to the interface the rules
|
607
|
+
will be applied on.
|
608
|
+
:param rules: The l1st of allow rules to apply.
|
583
609
|
"""
|
584
610
|
try:
|
585
611
|
self._populate_amphora_api_version(amphora, timeout_dict)
|
586
|
-
|
587
|
-
amphora, ip_address,
|
588
|
-
|
589
|
-
|
590
|
-
|
612
|
+
self.clients[amphora.api_version].set_interface_rules(
|
613
|
+
amphora, ip_address, rules, timeout_dict=timeout_dict)
|
614
|
+
except exc.NotFound as e:
|
615
|
+
LOG.debug('Amphora %s does not support the set_interface_rules '
|
616
|
+
'API.', amphora.id)
|
617
|
+
raise driver_except.AmpDriverNotImplementedError() from e
|
591
618
|
|
592
619
|
|
593
620
|
# Check a custom hostname
|
@@ -858,3 +885,8 @@ class AmphoraAPIClient1_0(AmphoraAPIClientBase):
|
|
858
885
|
def update_agent_config(self, amp, agent_config, timeout_dict=None):
|
859
886
|
r = self.put(amp, 'config', timeout_dict, data=agent_config)
|
860
887
|
return exc.check_exception(r)
|
888
|
+
|
889
|
+
def set_interface_rules(self, amp, ip_address, rules, timeout_dict=None):
|
890
|
+
r = self.put(amp, f'interface/{ip_address}/rules', timeout_dict,
|
891
|
+
json=rules)
|
892
|
+
return exc.check_exception(r)
|
@@ -490,7 +490,7 @@ class UpdateHealthDb:
|
|
490
490
|
raw_pools = listener['pools']
|
491
491
|
|
492
492
|
# normalize the pool IDs. Single process listener pools
|
493
|
-
# have the listener id appended with an ':'
|
493
|
+
# have the listener id appended with an ':' separator.
|
494
494
|
# Old multi-process listener pools only have a pool ID.
|
495
495
|
# This makes sure the keys are only pool IDs.
|
496
496
|
pools = {(k + ' ')[:k.rfind(':')]: v for k, v in
|
@@ -511,7 +511,7 @@ class UpdateHealthDb:
|
|
511
511
|
raw_pools = health['pools']
|
512
512
|
|
513
513
|
# normalize the pool IDs. Single process listener pools
|
514
|
-
# have the listener id appended with an ':'
|
514
|
+
# have the listener id appended with an ':' separator.
|
515
515
|
# Old multi-process listener pools only have a pool ID.
|
516
516
|
# This makes sure the keys are only pool IDs.
|
517
517
|
pools = {(k + ' ')[:k.rfind(':')]: v for k, v in raw_pools.items()}
|
@@ -88,7 +88,8 @@ class KeepalivedAmphoraDriverMixin(driver_base.VRRPDriverMixin):
|
|
88
88
|
|
89
89
|
LOG.info("Start amphora %s VRRP Service.", amphora.id)
|
90
90
|
|
91
|
-
self._populate_amphora_api_version(amphora
|
91
|
+
self._populate_amphora_api_version(amphora,
|
92
|
+
timeout_dict=timeout_dict)
|
92
93
|
self.clients[amphora.api_version].start_vrrp(amphora,
|
93
94
|
timeout_dict=timeout_dict)
|
94
95
|
|
@@ -11,10 +11,14 @@
|
|
11
11
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
12
12
|
# License for the specific language governing permissions and limitations
|
13
13
|
# under the License.
|
14
|
+
import random
|
14
15
|
|
15
16
|
from oslo_log import log as logging
|
16
17
|
|
17
18
|
from octavia.amphorae.drivers import driver_base
|
19
|
+
from octavia.common import data_models
|
20
|
+
from octavia.db import api as db_apis
|
21
|
+
from octavia.db import repositories
|
18
22
|
|
19
23
|
LOG = logging.getLogger(__name__)
|
20
24
|
|
@@ -34,6 +38,21 @@ class NoopManager(object):
|
|
34
38
|
self.amphoraconfig[(listener.id, amphora_id)] = (
|
35
39
|
listener, amphora_id, timeout_dict, "update_amp")
|
36
40
|
|
41
|
+
# Add some dummy stats to the DB when using noop driver
|
42
|
+
listener_stats_repo = repositories.ListenerStatisticsRepository()
|
43
|
+
stats_obj = data_models.ListenerStatistics(
|
44
|
+
listener_id=listener.id,
|
45
|
+
amphora_id=amphora.id,
|
46
|
+
bytes_in=random.randrange(1000000000),
|
47
|
+
bytes_out=random.randrange(1000000000),
|
48
|
+
active_connections=random.randrange(1000000000),
|
49
|
+
total_connections=random.randrange(1000000000),
|
50
|
+
request_errors=random.randrange(1000000000),
|
51
|
+
received_time=float(random.randrange(1000000000)),
|
52
|
+
)
|
53
|
+
listener_stats_repo.replace(session=db_apis.get_session(),
|
54
|
+
stats_obj=stats_obj)
|
55
|
+
|
37
56
|
def update(self, loadbalancer):
|
38
57
|
LOG.debug("Amphora %s no-op, update listener %s, vip %s",
|
39
58
|
self.__class__.__name__,
|
@@ -196,3 +215,9 @@ class NoopAmphoraLoadBalancerDriver(
|
|
196
215
|
|
197
216
|
def reload_vrrp_service(self, loadbalancer):
|
198
217
|
pass
|
218
|
+
|
219
|
+
def check(self, amphora, timeout_dict=None):
|
220
|
+
pass
|
221
|
+
|
222
|
+
def set_interface_rules(self, amphora, ip_address, rules):
|
223
|
+
pass
|
octavia/api/app.py
CHANGED
@@ -20,6 +20,7 @@ from oslo_log import log as logging
|
|
20
20
|
from oslo_middleware import cors
|
21
21
|
from oslo_middleware import http_proxy_to_wsgi
|
22
22
|
from oslo_middleware import request_id
|
23
|
+
from oslo_middleware import sizelimit
|
23
24
|
from pecan import configuration as pecan_configuration
|
24
25
|
from pecan import make_app as pecan_make_app
|
25
26
|
|
@@ -103,4 +104,6 @@ def _wrap_app(app):
|
|
103
104
|
expose_headers=['X-Auth-Token', 'X-Openstack-Request-Id']
|
104
105
|
)
|
105
106
|
|
107
|
+
app = sizelimit.RequestBodySizeLimiter(app, cfg.CONF)
|
108
|
+
|
106
109
|
return app
|