octavia 14.0.0.0rc1__py3-none-any.whl → 14.0.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- octavia/amphorae/backends/agent/api_server/keepalivedlvs.py +9 -0
- octavia/amphorae/backends/agent/api_server/osutils.py +4 -2
- octavia/amphorae/backends/agent/api_server/plug.py +5 -4
- octavia/amphorae/backends/agent/api_server/server.py +3 -2
- octavia/amphorae/backends/agent/api_server/util.py +35 -2
- octavia/amphorae/backends/utils/interface.py +2 -37
- octavia/amphorae/backends/utils/interface_file.py +23 -10
- octavia/amphorae/backends/utils/nftable_utils.py +33 -11
- octavia/amphorae/drivers/keepalived/jinja/jinja_cfg.py +0 -1
- octavia/amphorae/drivers/keepalived/jinja/templates/keepalived_base.template +0 -1
- octavia/api/common/pagination.py +1 -1
- octavia/api/v2/controllers/health_monitor.py +3 -1
- octavia/api/v2/controllers/load_balancer.py +7 -0
- octavia/api/v2/controllers/member.py +12 -2
- octavia/common/clients.py +7 -1
- octavia/common/constants.py +3 -3
- octavia/controller/worker/v2/controller_worker.py +2 -2
- octavia/controller/worker/v2/flows/amphora_flows.py +14 -3
- octavia/controller/worker/v2/flows/flow_utils.py +6 -4
- octavia/controller/worker/v2/flows/listener_flows.py +17 -5
- octavia/controller/worker/v2/tasks/database_tasks.py +10 -6
- octavia/controller/worker/v2/tasks/network_tasks.py +12 -13
- octavia/db/base_models.py +16 -4
- octavia/network/drivers/neutron/allowed_address_pairs.py +3 -2
- octavia/tests/functional/amphorae/backend/agent/api_server/test_server.py +1 -1
- octavia/tests/functional/api/v2/test_health_monitor.py +18 -0
- octavia/tests/functional/api/v2/test_member.py +32 -0
- octavia/tests/unit/amphorae/backends/agent/api_server/test_osutils.py +1 -1
- octavia/tests/unit/amphorae/backends/agent/api_server/test_plug.py +4 -3
- octavia/tests/unit/amphorae/backends/agent/api_server/test_util.py +89 -1
- octavia/tests/unit/amphorae/backends/utils/test_interface.py +3 -64
- octavia/tests/unit/amphorae/backends/utils/test_nftable_utils.py +28 -22
- octavia/tests/unit/amphorae/drivers/keepalived/jinja/test_jinja_cfg.py +0 -4
- octavia/tests/unit/api/common/test_pagination.py +78 -1
- octavia/tests/unit/cmd/test_prometheus_proxy.py +8 -1
- octavia/tests/unit/controller/worker/v2/flows/test_listener_flows.py +10 -15
- octavia/tests/unit/controller/worker/v2/flows/test_load_balancer_flows.py +4 -6
- octavia/tests/unit/controller/worker/v2/tasks/test_database_tasks.py +28 -6
- octavia/tests/unit/controller/worker/v2/tasks/test_network_tasks.py +57 -2
- octavia/tests/unit/controller/worker/v2/test_controller_worker.py +56 -1
- octavia/tests/unit/network/drivers/neutron/test_allowed_address_pairs.py +2 -1
- {octavia-14.0.0.0rc1.dist-info → octavia-14.0.1.dist-info}/AUTHORS +5 -0
- octavia-14.0.1.dist-info/METADATA +156 -0
- {octavia-14.0.0.0rc1.dist-info → octavia-14.0.1.dist-info}/RECORD +59 -59
- {octavia-14.0.0.0rc1.dist-info → octavia-14.0.1.dist-info}/WHEEL +1 -1
- {octavia-14.0.0.0rc1.dist-info → octavia-14.0.1.dist-info}/entry_points.txt +0 -1
- octavia-14.0.1.dist-info/pbr.json +1 -0
- octavia-14.0.0.0rc1.dist-info/METADATA +0 -158
- octavia-14.0.0.0rc1.dist-info/pbr.json +0 -1
- {octavia-14.0.0.0rc1.data → octavia-14.0.1.data}/data/share/octavia/LICENSE +0 -0
- {octavia-14.0.0.0rc1.data → octavia-14.0.1.data}/data/share/octavia/README.rst +0 -0
- {octavia-14.0.0.0rc1.data → octavia-14.0.1.data}/data/share/octavia/diskimage-create/README.rst +0 -0
- {octavia-14.0.0.0rc1.data → octavia-14.0.1.data}/data/share/octavia/diskimage-create/diskimage-create.sh +0 -0
- {octavia-14.0.0.0rc1.data → octavia-14.0.1.data}/data/share/octavia/diskimage-create/image-tests.sh +0 -0
- {octavia-14.0.0.0rc1.data → octavia-14.0.1.data}/data/share/octavia/diskimage-create/requirements.txt +0 -0
- {octavia-14.0.0.0rc1.data → octavia-14.0.1.data}/data/share/octavia/diskimage-create/test-requirements.txt +0 -0
- {octavia-14.0.0.0rc1.data → octavia-14.0.1.data}/data/share/octavia/diskimage-create/tox.ini +0 -0
- {octavia-14.0.0.0rc1.data → octavia-14.0.1.data}/data/share/octavia/diskimage-create/version.txt +0 -0
- {octavia-14.0.0.0rc1.data → octavia-14.0.1.data}/scripts/octavia-wsgi +0 -0
- {octavia-14.0.0.0rc1.dist-info → octavia-14.0.1.dist-info}/LICENSE +0 -0
- {octavia-14.0.0.0rc1.dist-info → octavia-14.0.1.dist-info}/top_level.txt +0 -0
@@ -230,6 +230,15 @@ class KeepalivedLvs(lvs_listener_base.LvsListenerApiServerBase):
|
|
230
230
|
.format(action, listener_id)),
|
231
231
|
'details': e.output}, status=500)
|
232
232
|
|
233
|
+
is_vrrp = (CONF.controller_worker.loadbalancer_topology ==
|
234
|
+
consts.TOPOLOGY_ACTIVE_STANDBY)
|
235
|
+
# TODO(gthiemonge) remove RESTART from the list (same as previous todo
|
236
|
+
# in this function)
|
237
|
+
if not is_vrrp and action in [consts.AMP_ACTION_START,
|
238
|
+
consts.AMP_ACTION_RESTART,
|
239
|
+
consts.AMP_ACTION_RELOAD]:
|
240
|
+
util.send_vip_advertisements(listener_id=listener_id)
|
241
|
+
|
233
242
|
return webob.Response(
|
234
243
|
json={'message': 'OK',
|
235
244
|
'details': 'keepalivedlvs listener {listener_id} '
|
@@ -75,11 +75,13 @@ class BaseOS(object):
|
|
75
75
|
is_sriov=is_sriov)
|
76
76
|
vip_interface.write()
|
77
77
|
|
78
|
-
def write_port_interface_file(self, interface, fixed_ips, mtu
|
78
|
+
def write_port_interface_file(self, interface, fixed_ips, mtu,
|
79
|
+
is_sriov=False):
|
79
80
|
port_interface = interface_file.PortInterfaceFile(
|
80
81
|
name=interface,
|
81
82
|
mtu=mtu,
|
82
|
-
fixed_ips=fixed_ips
|
83
|
+
fixed_ips=fixed_ips,
|
84
|
+
is_sriov=is_sriov)
|
83
85
|
port_interface.write()
|
84
86
|
|
85
87
|
@classmethod
|
@@ -152,7 +152,7 @@ class Plug(object):
|
|
152
152
|
socket.inet_pton(socket.AF_INET6, ip.get('ip_address'))
|
153
153
|
|
154
154
|
def plug_network(self, mac_address, fixed_ips, mtu=None,
|
155
|
-
vip_net_info=None):
|
155
|
+
vip_net_info=None, is_sriov=False):
|
156
156
|
try:
|
157
157
|
self._check_ip_addresses(fixed_ips=fixed_ips)
|
158
158
|
except socket.error:
|
@@ -189,7 +189,8 @@ class Plug(object):
|
|
189
189
|
vips=rendered_vips,
|
190
190
|
mtu=mtu,
|
191
191
|
vrrp_info=vrrp_info,
|
192
|
-
fixed_ips=fixed_ips
|
192
|
+
fixed_ips=fixed_ips,
|
193
|
+
is_sriov=is_sriov)
|
193
194
|
self._osutils.bring_interface_up(existing_interface, 'vip')
|
194
195
|
# Otherwise, we are just plugging a run-of-the-mill network
|
195
196
|
else:
|
@@ -197,7 +198,7 @@ class Plug(object):
|
|
197
198
|
self._osutils.write_port_interface_file(
|
198
199
|
interface=existing_interface,
|
199
200
|
fixed_ips=fixed_ips,
|
200
|
-
mtu=mtu)
|
201
|
+
mtu=mtu, is_sriov=is_sriov)
|
201
202
|
self._osutils.bring_interface_up(existing_interface, 'network')
|
202
203
|
|
203
204
|
util.send_member_advertisements(fixed_ips)
|
@@ -222,7 +223,7 @@ class Plug(object):
|
|
222
223
|
self._osutils.write_port_interface_file(
|
223
224
|
interface=netns_interface,
|
224
225
|
fixed_ips=fixed_ips,
|
225
|
-
mtu=mtu)
|
226
|
+
mtu=mtu, is_sriov=is_sriov)
|
226
227
|
|
227
228
|
# Update the list of interfaces to add to the namespace
|
228
229
|
self._update_plugged_interfaces_file(netns_interface, mac_address)
|
@@ -223,7 +223,8 @@ class Server(object):
|
|
223
223
|
return self._plug.plug_network(port_info['mac_address'],
|
224
224
|
port_info.get('fixed_ips'),
|
225
225
|
port_info.get('mtu'),
|
226
|
-
port_info.get('vip_net_info')
|
226
|
+
port_info.get('vip_net_info'),
|
227
|
+
port_info.get('is_sriov'))
|
227
228
|
|
228
229
|
def upload_cert(self):
|
229
230
|
return certificate_update.upload_server_cert()
|
@@ -278,7 +279,7 @@ class Server(object):
|
|
278
279
|
raise exceptions.BadRequest(
|
279
280
|
description='Invalid rules information') from e
|
280
281
|
|
281
|
-
nftable_utils.
|
282
|
+
nftable_utils.write_nftable_rules_file(interface, rules_info)
|
282
283
|
|
283
284
|
nftable_utils.load_nftables_file()
|
284
285
|
|
@@ -392,7 +392,37 @@ def get_haproxy_vip_addresses(lb_id):
|
|
392
392
|
return vips
|
393
393
|
|
394
394
|
|
395
|
-
def
|
395
|
+
def get_lvs_vip_addresses(listener_id: str) -> tp.List[str]:
|
396
|
+
"""Get the VIP addresses for a LVS load balancer.
|
397
|
+
|
398
|
+
:param listener_id: The listener ID to get VIP addresses from.
|
399
|
+
:returns: List of VIP addresses (IPv4 and IPv6)
|
400
|
+
"""
|
401
|
+
vips = []
|
402
|
+
# Extract the VIP addresses from keepalived configuration
|
403
|
+
# Format is
|
404
|
+
# virtual_server_group ipv<n>-group {
|
405
|
+
# vip_address1 port1
|
406
|
+
# vip_address2 port2
|
407
|
+
# }
|
408
|
+
# it can be repeated in case of dual-stack LBs
|
409
|
+
with open(keepalived_lvs_cfg_path(listener_id), encoding='utf-8') as file:
|
410
|
+
vsg_section = False
|
411
|
+
for line in file:
|
412
|
+
current_line = line.strip()
|
413
|
+
if vsg_section:
|
414
|
+
if current_line.startswith('}'):
|
415
|
+
vsg_section = False
|
416
|
+
else:
|
417
|
+
vip_address = current_line.split(' ')[0]
|
418
|
+
vips.append(vip_address)
|
419
|
+
elif line.startswith('virtual_server_group '):
|
420
|
+
vsg_section = True
|
421
|
+
return vips
|
422
|
+
|
423
|
+
|
424
|
+
def send_vip_advertisements(lb_id: tp.Optional[str] = None,
|
425
|
+
listener_id: tp.Optional[str] = None):
|
396
426
|
"""Sends address advertisements for each load balancer VIP.
|
397
427
|
|
398
428
|
This method will send either GARP (IPv4) or neighbor advertisements (IPv6)
|
@@ -402,7 +432,10 @@ def send_vip_advertisements(lb_id):
|
|
402
432
|
:returns: None
|
403
433
|
"""
|
404
434
|
try:
|
405
|
-
|
435
|
+
if lb_id:
|
436
|
+
vips = get_haproxy_vip_addresses(lb_id)
|
437
|
+
else:
|
438
|
+
vips = get_lvs_vip_addresses(listener_id)
|
406
439
|
|
407
440
|
for vip in vips:
|
408
441
|
interface = network_utils.get_interface_name(
|
@@ -176,47 +176,12 @@ class InterfaceController(object):
|
|
176
176
|
ip_network = ipaddress.ip_network(address, strict=False)
|
177
177
|
return ip_network.compressed
|
178
178
|
|
179
|
-
def _setup_nftables_chain(self, interface):
|
180
|
-
# TODO(johnsom) Move this to pyroute2 when the nftables library
|
181
|
-
# improves.
|
182
|
-
|
183
|
-
# Create the nftable
|
184
|
-
cmd = [consts.NFT_CMD, consts.NFT_ADD, 'table', consts.NFT_FAMILY,
|
185
|
-
consts.NFT_VIP_TABLE]
|
186
|
-
try:
|
187
|
-
subprocess.check_output(cmd, stderr=subprocess.STDOUT)
|
188
|
-
except Exception as e:
|
189
|
-
if hasattr(e, 'output'):
|
190
|
-
LOG.error(e.output)
|
191
|
-
else:
|
192
|
-
LOG.error(e)
|
193
|
-
raise
|
194
|
-
|
195
|
-
# Create the chain with -310 priority to put it in front of the
|
196
|
-
# lvs-masquerade configured chain
|
197
|
-
cmd = [consts.NFT_CMD, consts.NFT_ADD, 'chain', consts.NFT_FAMILY,
|
198
|
-
consts.NFT_VIP_TABLE, consts.NFT_VIP_CHAIN,
|
199
|
-
'{', 'type', 'filter', 'hook', 'ingress', 'device',
|
200
|
-
interface.name, 'priority', consts.NFT_SRIOV_PRIORITY, ';',
|
201
|
-
'policy', 'drop', ';', '}']
|
202
|
-
try:
|
203
|
-
subprocess.check_output(cmd, stderr=subprocess.STDOUT)
|
204
|
-
except Exception as e:
|
205
|
-
if hasattr(e, 'output'):
|
206
|
-
LOG.error(e.output)
|
207
|
-
else:
|
208
|
-
LOG.error(e)
|
209
|
-
raise
|
210
|
-
|
211
|
-
nftable_utils.write_nftable_vip_rules_file(interface.name, [])
|
212
|
-
|
213
|
-
nftable_utils.load_nftables_file()
|
214
|
-
|
215
179
|
def up(self, interface):
|
216
180
|
LOG.info("Setting interface %s up", interface.name)
|
217
181
|
|
218
182
|
if interface.is_sriov:
|
219
|
-
|
183
|
+
nftable_utils.write_nftable_rules_file(interface.name, [])
|
184
|
+
nftable_utils.load_nftables_file()
|
220
185
|
|
221
186
|
with pyroute2.IPRoute() as ipr:
|
222
187
|
idx = ipr.link_lookup(ifname=interface.name)[0]
|
@@ -227,22 +227,28 @@ class VIPInterfaceFile(InterfaceFile):
|
|
227
227
|
fixed_ip.get('host_routes', []))
|
228
228
|
self.routes.extend(host_routes)
|
229
229
|
|
230
|
+
if is_sriov:
|
231
|
+
sriov_param = ' sriov'
|
232
|
+
else:
|
233
|
+
sriov_param = ''
|
234
|
+
|
230
235
|
for ip_v in ip_versions:
|
231
236
|
self.scripts[consts.IFACE_UP].append({
|
232
237
|
consts.COMMAND: (
|
233
|
-
"/usr/local/bin/lvs-masquerade.sh add {} {}".format(
|
234
|
-
'ipv6' if ip_v == 6 else 'ipv4', name))
|
238
|
+
"/usr/local/bin/lvs-masquerade.sh add {} {}{}".format(
|
239
|
+
'ipv6' if ip_v == 6 else 'ipv4', name, sriov_param))
|
235
240
|
})
|
236
241
|
self.scripts[consts.IFACE_DOWN].append({
|
237
242
|
consts.COMMAND: (
|
238
|
-
"/usr/local/bin/lvs-masquerade.sh delete {} {}".format(
|
239
|
-
'ipv6' if ip_v == 6 else 'ipv4', name))
|
243
|
+
"/usr/local/bin/lvs-masquerade.sh delete {} {}{}".format(
|
244
|
+
'ipv6' if ip_v == 6 else 'ipv4', name, sriov_param))
|
240
245
|
})
|
241
246
|
|
242
247
|
|
243
248
|
class PortInterfaceFile(InterfaceFile):
|
244
|
-
def __init__(self, name, mtu, fixed_ips):
|
245
|
-
super().__init__(name, if_type=consts.BACKEND, mtu=mtu
|
249
|
+
def __init__(self, name, mtu, fixed_ips, is_sriov=False):
|
250
|
+
super().__init__(name, if_type=consts.BACKEND, mtu=mtu,
|
251
|
+
is_sriov=is_sriov)
|
246
252
|
|
247
253
|
if fixed_ips:
|
248
254
|
ip_versions = set()
|
@@ -271,14 +277,21 @@ class PortInterfaceFile(InterfaceFile):
|
|
271
277
|
consts.IPV6AUTO: True
|
272
278
|
})
|
273
279
|
|
280
|
+
if is_sriov:
|
281
|
+
sriov_param = ' sriov'
|
282
|
+
else:
|
283
|
+
sriov_param = ''
|
284
|
+
|
274
285
|
for ip_version in ip_versions:
|
275
286
|
self.scripts[consts.IFACE_UP].append({
|
276
287
|
consts.COMMAND: (
|
277
|
-
"/usr/local/bin/lvs-masquerade.sh add {} {}".format(
|
278
|
-
'ipv6' if ip_version == 6 else 'ipv4', name
|
288
|
+
"/usr/local/bin/lvs-masquerade.sh add {} {}{}".format(
|
289
|
+
'ipv6' if ip_version == 6 else 'ipv4', name,
|
290
|
+
sriov_param))
|
279
291
|
})
|
280
292
|
self.scripts[consts.IFACE_DOWN].append({
|
281
293
|
consts.COMMAND: (
|
282
|
-
"/usr/local/bin/lvs-masquerade.sh delete {} {}".format(
|
283
|
-
'ipv6' if ip_version == 6 else 'ipv4', name
|
294
|
+
"/usr/local/bin/lvs-masquerade.sh delete {} {}{}".format(
|
295
|
+
'ipv6' if ip_version == 6 else 'ipv4', name,
|
296
|
+
sriov_param))
|
284
297
|
})
|
@@ -26,16 +26,26 @@ from octavia.common import utils
|
|
26
26
|
LOG = logging.getLogger(__name__)
|
27
27
|
|
28
28
|
|
29
|
-
def
|
29
|
+
def write_nftable_rules_file(interface_name, rules):
|
30
30
|
flags = os.O_WRONLY | os.O_CREAT | os.O_TRUNC
|
31
31
|
# mode 00600
|
32
32
|
mode = stat.S_IRUSR | stat.S_IWUSR
|
33
33
|
|
34
34
|
# Create some strings shared on both code paths
|
35
|
-
table_string = f'table {consts.NFT_FAMILY} {consts.
|
36
|
-
chain_string = f' chain {consts.
|
37
|
-
|
38
|
-
|
35
|
+
table_string = f'table {consts.NFT_FAMILY} {consts.NFT_TABLE} {{\n'
|
36
|
+
chain_string = f' chain {consts.NFT_CHAIN} {{\n'
|
37
|
+
vip_chain_string = f' chain {consts.NFT_VIP_CHAIN} {{\n'
|
38
|
+
hook_string = (' type filter hook input priority filter; '
|
39
|
+
'policy drop;\n')
|
40
|
+
|
41
|
+
# Conntrack is used to allow flow return traffic
|
42
|
+
conntrack_string = (' ct state vmap { established : accept, '
|
43
|
+
'related : accept, invalid : drop }\n')
|
44
|
+
|
45
|
+
# Allow loopback traffic on the loopback interface, no where else
|
46
|
+
loopback_string = ' iif lo accept\n'
|
47
|
+
loopback_addr_string = ' ip saddr 127.0.0.0/8 drop\n'
|
48
|
+
loopback_ipv6_addr_string = ' ip6 saddr ::1 drop\n'
|
39
49
|
|
40
50
|
# Allow ICMP destination unreachable for PMTUD
|
41
51
|
icmp_string = ' icmp type destination-unreachable accept\n'
|
@@ -47,38 +57,50 @@ def write_nftable_vip_rules_file(interface_name, rules):
|
|
47
57
|
dhcp_string = ' udp sport 67 udp dport 68 accept\n'
|
48
58
|
dhcpv6_string = ' udp sport 547 udp dport 546 accept\n'
|
49
59
|
|
60
|
+
# If the packet came in on the VIP interface, goto the VIP rules chain
|
61
|
+
vip_interface_goto_string = (
|
62
|
+
f' iifname {consts.NETNS_PRIMARY_INTERFACE} '
|
63
|
+
f'goto {consts.NFT_VIP_CHAIN}\n')
|
64
|
+
|
50
65
|
# Check if an existing rules file exists or we be need to create an
|
51
66
|
# "drop all" file with no rules except for VRRP. If it exists, we should
|
52
67
|
# not overwrite it here as it could be a reboot unless we were passed new
|
53
68
|
# rules.
|
54
|
-
if os.path.isfile(consts.
|
69
|
+
if os.path.isfile(consts.NFT_RULES_FILE):
|
55
70
|
if not rules:
|
56
71
|
return
|
57
72
|
with os.fdopen(
|
58
|
-
os.open(consts.
|
73
|
+
os.open(consts.NFT_RULES_FILE, flags, mode), 'w') as file:
|
59
74
|
# Clear the existing rules in the kernel
|
60
75
|
# Note: The "nft -f" method is atomic, so clearing the rules will
|
61
76
|
# not leave the amphora exposed.
|
62
77
|
# Create and delete the table to not get errors if the table does
|
63
78
|
# not exist yet.
|
64
|
-
file.write(f'table {consts.NFT_FAMILY} {consts.
|
79
|
+
file.write(f'table {consts.NFT_FAMILY} {consts.NFT_TABLE} '
|
65
80
|
'{}\n')
|
66
81
|
file.write(f'delete table {consts.NFT_FAMILY} '
|
67
|
-
f'{consts.
|
82
|
+
f'{consts.NFT_TABLE}\n')
|
68
83
|
file.write(table_string)
|
69
84
|
file.write(chain_string)
|
70
85
|
file.write(hook_string)
|
86
|
+
file.write(conntrack_string)
|
87
|
+
file.write(loopback_string)
|
88
|
+
file.write(loopback_addr_string)
|
89
|
+
file.write(loopback_ipv6_addr_string)
|
71
90
|
file.write(icmp_string)
|
72
91
|
file.write(icmpv6_string)
|
73
92
|
file.write(dhcp_string)
|
74
93
|
file.write(dhcpv6_string)
|
94
|
+
file.write(vip_interface_goto_string)
|
95
|
+
file.write(' }\n') # close the chain
|
96
|
+
file.write(vip_chain_string)
|
75
97
|
for rule in rules:
|
76
98
|
file.write(f' {_build_rule_cmd(rule)}\n')
|
77
99
|
file.write(' }\n') # close the chain
|
78
100
|
file.write('}\n') # close the table
|
79
101
|
else: # No existing rules, create the "drop all" base rules
|
80
102
|
with os.fdopen(
|
81
|
-
os.open(consts.
|
103
|
+
os.open(consts.NFT_RULES_FILE, flags, mode), 'w') as file:
|
82
104
|
file.write(table_string)
|
83
105
|
file.write(chain_string)
|
84
106
|
file.write(hook_string)
|
@@ -113,7 +135,7 @@ def _build_rule_cmd(rule):
|
|
113
135
|
|
114
136
|
|
115
137
|
def load_nftables_file():
|
116
|
-
cmd = [consts.NFT_CMD, '-o', '-f', consts.
|
138
|
+
cmd = [consts.NFT_CMD, '-o', '-f', consts.NFT_RULES_FILE]
|
117
139
|
try:
|
118
140
|
with network_namespace.NetworkNamespace(consts.AMPHORA_NAMESPACE):
|
119
141
|
subprocess.check_output(cmd, stderr=subprocess.STDOUT)
|
@@ -123,7 +123,6 @@ class KeepalivedJinjaTemplater(object):
|
|
123
123
|
peers_ips.append(amp.vrrp_ip)
|
124
124
|
return self.get_template(self.keepalived_template).render(
|
125
125
|
{'vrrp_group_name': loadbalancer.vrrp_group.vrrp_group_name,
|
126
|
-
'amp_role': amphora.role,
|
127
126
|
'amp_intf': amphora.vrrp_interface,
|
128
127
|
'amp_vrrp_id': amphora.vrrp_id,
|
129
128
|
'amp_priority': amphora.vrrp_priority,
|
octavia/api/common/pagination.py
CHANGED
@@ -169,7 +169,7 @@ class PaginationHelper(object):
|
|
169
169
|
# TODO(rm_work) Do we need to know when there are more vs exact?
|
170
170
|
# We safely know if we have a full page, but it might include the
|
171
171
|
# last element or it might not, it is unclear
|
172
|
-
if len(model_list) >= self.limit:
|
172
|
+
if self.limit is None or len(model_list) >= self.limit:
|
173
173
|
next_attr.append("marker={}".format(model_list[-1].get('id')))
|
174
174
|
next_link = {
|
175
175
|
"rel": "next",
|
@@ -188,7 +188,9 @@ class HealthMonitorController(base.BaseController):
|
|
188
188
|
request.type == consts.HEALTH_MONITOR_UDP_CONNECT)
|
189
189
|
conf_min_delay = (
|
190
190
|
CONF.api_settings.udp_connect_min_interval_health_monitor)
|
191
|
-
if hm_is_type_udp and
|
191
|
+
if (hm_is_type_udp and
|
192
|
+
not isinstance(request.delay, wtypes.UnsetType) and
|
193
|
+
request.delay < conf_min_delay):
|
192
194
|
raise exceptions.ValidationException(detail=_(
|
193
195
|
"The request delay value %(delay)s should be larger than "
|
194
196
|
"%(conf_min_delay)s for %(type)s health monitor type.") % {
|
@@ -558,6 +558,13 @@ class LoadBalancersController(base.BaseController):
|
|
558
558
|
subnet_id=add_vip.subnet_id)
|
559
559
|
|
560
560
|
if listeners or pools:
|
561
|
+
# expire_all is required here, it ensures that the loadbalancer
|
562
|
+
# will be re-fetched with its associated vip in _graph_create.
|
563
|
+
# without expire_all the vip attributes that have been updated
|
564
|
+
# just before this call may not be set correctly in the
|
565
|
+
# loadbalancer object.
|
566
|
+
lock_session.expire_all()
|
567
|
+
|
561
568
|
db_pools, db_lists = self._graph_create(
|
562
569
|
lock_session, db_lb, listeners, pools)
|
563
570
|
|
@@ -31,6 +31,7 @@ from octavia.common import data_models
|
|
31
31
|
from octavia.common import exceptions
|
32
32
|
from octavia.common import validate
|
33
33
|
from octavia.db import prepare as db_prepare
|
34
|
+
from octavia.i18n import _
|
34
35
|
|
35
36
|
|
36
37
|
LOG = logging.getLogger(__name__)
|
@@ -365,12 +366,21 @@ class MembersController(MemberController):
|
|
365
366
|
# Find members that are brand new or updated
|
366
367
|
new_members = []
|
367
368
|
updated_members = []
|
369
|
+
updated_member_uniques = set()
|
368
370
|
for m in members:
|
369
|
-
|
371
|
+
key = (m.address, m.protocol_port)
|
372
|
+
if key not in old_member_uniques:
|
370
373
|
validate.ip_not_reserved(m.address)
|
371
374
|
new_members.append(m)
|
372
375
|
else:
|
373
|
-
m.id = old_member_uniques[
|
376
|
+
m.id = old_member_uniques[key]
|
377
|
+
if key in updated_member_uniques:
|
378
|
+
LOG.error("Member %s is updated multiple times in "
|
379
|
+
"the same batch request.", m.id)
|
380
|
+
raise exceptions.ValidationException(
|
381
|
+
detail=_("Member must be updated only once in the "
|
382
|
+
"same request."))
|
383
|
+
updated_member_uniques.add(key)
|
374
384
|
updated_members.append(m)
|
375
385
|
|
376
386
|
# Find members that are deleted
|
octavia/common/clients.py
CHANGED
@@ -111,6 +111,7 @@ class NeutronAuth(object):
|
|
111
111
|
client.
|
112
112
|
"""
|
113
113
|
sess = keystone.KeystoneSession('neutron').get_session()
|
114
|
+
kwargs = {}
|
114
115
|
neutron_endpoint = CONF.neutron.endpoint_override
|
115
116
|
if neutron_endpoint is None:
|
116
117
|
endpoint_data = sess.get_endpoint_data(
|
@@ -119,8 +120,13 @@ class NeutronAuth(object):
|
|
119
120
|
region_name=CONF.neutron.region_name)
|
120
121
|
neutron_endpoint = endpoint_data.catalog_url
|
121
122
|
|
123
|
+
neutron_cafile = getattr(CONF.neutron, "cafile", None)
|
124
|
+
insecure = getattr(CONF.neutron, "insecure", False)
|
125
|
+
kwargs['verify'] = not insecure
|
126
|
+
if neutron_cafile is not None and not insecure:
|
127
|
+
kwargs['verify'] = neutron_cafile
|
122
128
|
user_auth = token_endpoint.Token(neutron_endpoint, context.auth_token)
|
123
|
-
user_sess = session.Session(auth=user_auth)
|
129
|
+
user_sess = session.Session(auth=user_auth, **kwargs)
|
124
130
|
|
125
131
|
conn = openstack.connection.Connection(
|
126
132
|
session=user_sess, oslo_conf=CONF)
|
octavia/common/constants.py
CHANGED
@@ -979,8 +979,8 @@ AMP_NET_DIR_TEMPLATE = '/etc/octavia/interfaces/'
|
|
979
979
|
NFT_ADD = 'add'
|
980
980
|
NFT_CMD = '/usr/sbin/nft'
|
981
981
|
NFT_FAMILY = 'inet'
|
982
|
-
|
983
|
-
|
982
|
+
NFT_RULES_FILE = '/var/lib/octavia/nftables-vip.rules'
|
983
|
+
NFT_TABLE = 'amphora_table'
|
984
|
+
NFT_CHAIN = 'amphora_chain'
|
984
985
|
NFT_VIP_CHAIN = 'amphora_vip_chain'
|
985
|
-
NFT_SRIOV_PRIORITY = '-310'
|
986
986
|
PROTOCOL = 'protocol'
|
@@ -431,8 +431,8 @@ class ControllerWorker(object):
|
|
431
431
|
constants.SERVER_GROUP_ID: db_lb.server_group_id,
|
432
432
|
constants.PROJECT_ID: db_lb.project_id}
|
433
433
|
if cascade:
|
434
|
-
listeners = flow_utils.get_listeners_on_lb(db_lb)
|
435
|
-
pools = flow_utils.get_pools_on_lb(db_lb)
|
434
|
+
listeners = flow_utils.get_listeners_on_lb(db_lb, True)
|
435
|
+
pools = flow_utils.get_pools_on_lb(db_lb, True)
|
436
436
|
|
437
437
|
self.run_flow(
|
438
438
|
flow_utils.get_cascade_delete_load_balancer_flow,
|
@@ -404,7 +404,7 @@ class AmphoraFlows(object):
|
|
404
404
|
def get_amphora_for_lb_failover_subflow(
|
405
405
|
self, prefix, role=constants.ROLE_STANDALONE,
|
406
406
|
failed_amp_vrrp_port_id=None, is_vrrp_ipv6=False,
|
407
|
-
flavor_dict=None):
|
407
|
+
flavor_dict=None, timeout_dict=None):
|
408
408
|
"""Creates a new amphora that will be used in a failover flow.
|
409
409
|
|
410
410
|
:requires: loadbalancer_id, flavor, vip, vip_sg_id, loadbalancer
|
@@ -488,13 +488,24 @@ class AmphoraFlows(object):
|
|
488
488
|
rebind={constants.AMPHORAE: constants.NEW_AMPHORAE},
|
489
489
|
provides=constants.AMPHORA_FIREWALL_RULES,
|
490
490
|
inject={constants.AMPHORA_INDEX: 0}))
|
491
|
+
amp_for_failover_flow.add(
|
492
|
+
amphora_driver_tasks.AmphoraeGetConnectivityStatus(
|
493
|
+
name=(prefix + '-' +
|
494
|
+
constants.AMPHORAE_GET_CONNECTIVITY_STATUS),
|
495
|
+
requires=constants.AMPHORAE,
|
496
|
+
rebind={constants.AMPHORAE: constants.NEW_AMPHORAE},
|
497
|
+
inject={constants.TIMEOUT_DICT: timeout_dict,
|
498
|
+
constants.NEW_AMPHORA_ID: constants.NIL_UUID},
|
499
|
+
provides=constants.AMPHORAE_STATUS))
|
491
500
|
amp_for_failover_flow.add(
|
492
501
|
amphora_driver_tasks.SetAmphoraFirewallRules(
|
493
502
|
name=prefix + '-' + constants.SET_AMPHORA_FIREWALL_RULES,
|
494
503
|
requires=(constants.AMPHORAE,
|
495
|
-
constants.AMPHORA_FIREWALL_RULES
|
504
|
+
constants.AMPHORA_FIREWALL_RULES,
|
505
|
+
constants.AMPHORAE_STATUS),
|
496
506
|
rebind={constants.AMPHORAE: constants.NEW_AMPHORAE},
|
497
|
-
inject={constants.AMPHORA_INDEX: 0
|
507
|
+
inject={constants.AMPHORA_INDEX: 0,
|
508
|
+
constants.TIMEOUT_DICT: timeout_dict}))
|
498
509
|
|
499
510
|
# Plug member ports
|
500
511
|
amp_for_failover_flow.add(network_tasks.CalculateAmphoraDelta(
|
@@ -41,29 +41,31 @@ def get_delete_load_balancer_flow(lb):
|
|
41
41
|
return LB_FLOWS.get_delete_load_balancer_flow(lb)
|
42
42
|
|
43
43
|
|
44
|
-
def get_listeners_on_lb(db_lb):
|
44
|
+
def get_listeners_on_lb(db_lb, for_delete=False):
|
45
45
|
"""Get a list of the listeners on a load balancer.
|
46
46
|
|
47
47
|
:param db_lb: A load balancer database model object.
|
48
|
+
:param for_delete: Skip errors on tls certs loading.
|
48
49
|
:returns: A list of provider dict format listeners.
|
49
50
|
"""
|
50
51
|
listener_dicts = []
|
51
52
|
for listener in db_lb.listeners:
|
52
53
|
prov_listener = provider_utils.db_listener_to_provider_listener(
|
53
|
-
listener)
|
54
|
+
listener, for_delete)
|
54
55
|
listener_dicts.append(prov_listener.to_dict())
|
55
56
|
return listener_dicts
|
56
57
|
|
57
58
|
|
58
|
-
def get_pools_on_lb(db_lb):
|
59
|
+
def get_pools_on_lb(db_lb, for_delete=False):
|
59
60
|
"""Get a list of the pools on a load balancer.
|
60
61
|
|
61
62
|
:param db_lb: A load balancer database model object.
|
63
|
+
:param for_delete: Skip errors on tls certs loading.
|
62
64
|
:returns: A list of provider dict format pools.
|
63
65
|
"""
|
64
66
|
pool_dicts = []
|
65
67
|
for pool in db_lb.pools:
|
66
|
-
prov_pool = provider_utils.db_pool_to_provider_pool(pool)
|
68
|
+
prov_pool = provider_utils.db_pool_to_provider_pool(pool, for_delete)
|
67
69
|
pool_dicts.append(prov_pool.to_dict())
|
68
70
|
return pool_dicts
|
69
71
|
|
@@ -161,7 +161,7 @@ class ListenerFlows(object):
|
|
161
161
|
|
162
162
|
return update_listener_flow
|
163
163
|
|
164
|
-
def _get_firewall_rules_subflow(self, flavor_dict):
|
164
|
+
def _get_firewall_rules_subflow(self, flavor_dict, timeout_dict=None):
|
165
165
|
"""Creates a subflow that updates the firewall rules in the amphorae.
|
166
166
|
|
167
167
|
:returns: The subflow for updating firewall rules in the amphorae.
|
@@ -174,6 +174,14 @@ class ListenerFlows(object):
|
|
174
174
|
requires=constants.LOADBALANCER_ID,
|
175
175
|
provides=constants.AMPHORAE))
|
176
176
|
|
177
|
+
fw_rules_subflow.add(
|
178
|
+
amphora_driver_tasks.AmphoraeGetConnectivityStatus(
|
179
|
+
name=constants.AMPHORAE_GET_CONNECTIVITY_STATUS,
|
180
|
+
requires=constants.AMPHORAE,
|
181
|
+
inject={constants.TIMEOUT_DICT: timeout_dict,
|
182
|
+
constants.NEW_AMPHORA_ID: constants.NIL_UUID},
|
183
|
+
provides=constants.AMPHORAE_STATUS))
|
184
|
+
|
177
185
|
fw_rules_subflow.add(network_tasks.GetAmphoraeNetworkConfigs(
|
178
186
|
name=sf_name + '-' + constants.GET_AMP_NETWORK_CONFIG,
|
179
187
|
requires=constants.LOADBALANCER_ID,
|
@@ -192,8 +200,10 @@ class ListenerFlows(object):
|
|
192
200
|
|
193
201
|
amp_0_subflow.add(amphora_driver_tasks.SetAmphoraFirewallRules(
|
194
202
|
name=sf_name + '-0-' + constants.SET_AMPHORA_FIREWALL_RULES,
|
195
|
-
requires=(constants.AMPHORAE, constants.AMPHORA_FIREWALL_RULES
|
196
|
-
|
203
|
+
requires=(constants.AMPHORAE, constants.AMPHORA_FIREWALL_RULES,
|
204
|
+
constants.AMPHORAE_STATUS),
|
205
|
+
inject={constants.AMPHORA_INDEX: 0,
|
206
|
+
constants.TIMEOUT_DICT: timeout_dict}))
|
197
207
|
|
198
208
|
update_amps_subflow.add(amp_0_subflow)
|
199
209
|
|
@@ -212,8 +222,10 @@ class ListenerFlows(object):
|
|
212
222
|
amp_1_subflow.add(amphora_driver_tasks.SetAmphoraFirewallRules(
|
213
223
|
name=sf_name + '-1-' + constants.SET_AMPHORA_FIREWALL_RULES,
|
214
224
|
requires=(constants.AMPHORAE,
|
215
|
-
constants.AMPHORA_FIREWALL_RULES
|
216
|
-
|
225
|
+
constants.AMPHORA_FIREWALL_RULES,
|
226
|
+
constants.AMPHORAE_STATUS),
|
227
|
+
inject={constants.AMPHORA_INDEX: 1,
|
228
|
+
constants.TIMEOUT_DICT: timeout_dict}))
|
217
229
|
|
218
230
|
update_amps_subflow.add(amp_1_subflow)
|
219
231
|
|
@@ -132,13 +132,17 @@ class CreateAmphoraInDB(BaseDatabaseTask):
|
|
132
132
|
LOG.warning("Reverting create amphora in DB for amp id %s ", result)
|
133
133
|
|
134
134
|
# Delete the amphora for now. May want to just update status later
|
135
|
-
|
136
|
-
|
135
|
+
with db_apis.session().begin() as session:
|
136
|
+
try:
|
137
137
|
self.amphora_repo.delete(session, id=result)
|
138
|
-
|
139
|
-
|
140
|
-
|
141
|
-
|
138
|
+
except Exception as e:
|
139
|
+
LOG.error("Failed to delete amphora %(amp)s "
|
140
|
+
"in the database due to: "
|
141
|
+
"%(except)s", {'amp': result, 'except': str(e)})
|
142
|
+
try:
|
143
|
+
self.amp_health_repo.delete(session, amphora_id=result)
|
144
|
+
except Exception:
|
145
|
+
pass
|
142
146
|
|
143
147
|
|
144
148
|
class MarkLBAmphoraeDeletedInDB(BaseDatabaseTask):
|