octavia 12.0.0.0rc2__py3-none-any.whl → 13.0.0.0rc1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- octavia/amphorae/backends/agent/api_server/osutils.py +1 -0
- octavia/amphorae/backends/agent/api_server/plug.py +21 -7
- octavia/amphorae/backends/agent/api_server/templates/amphora-netns.systemd.j2 +2 -2
- octavia/amphorae/backends/agent/api_server/util.py +21 -0
- octavia/amphorae/backends/health_daemon/health_daemon.py +9 -3
- octavia/amphorae/backends/health_daemon/health_sender.py +2 -0
- octavia/amphorae/backends/utils/interface.py +14 -6
- octavia/amphorae/backends/utils/interface_file.py +6 -3
- octavia/amphorae/backends/utils/keepalivedlvs_query.py +8 -9
- octavia/amphorae/drivers/driver_base.py +1 -2
- octavia/amphorae/drivers/haproxy/rest_api_driver.py +11 -25
- octavia/amphorae/drivers/health/heartbeat_udp.py +34 -24
- octavia/amphorae/drivers/keepalived/jinja/jinja_cfg.py +3 -12
- octavia/amphorae/drivers/noop_driver/driver.py +3 -5
- octavia/api/common/pagination.py +4 -4
- octavia/api/drivers/amphora_driver/v2/driver.py +11 -5
- octavia/api/drivers/driver_agent/driver_get.py +22 -14
- octavia/api/drivers/driver_agent/driver_updater.py +8 -4
- octavia/api/drivers/utils.py +4 -2
- octavia/api/healthcheck/healthcheck_plugins.py +4 -2
- octavia/api/root_controller.py +4 -1
- octavia/api/v2/controllers/amphora.py +35 -38
- octavia/api/v2/controllers/availability_zone_profiles.py +43 -33
- octavia/api/v2/controllers/availability_zones.py +22 -18
- octavia/api/v2/controllers/flavor_profiles.py +37 -28
- octavia/api/v2/controllers/flavors.py +19 -15
- octavia/api/v2/controllers/health_monitor.py +44 -33
- octavia/api/v2/controllers/l7policy.py +52 -40
- octavia/api/v2/controllers/l7rule.py +68 -55
- octavia/api/v2/controllers/listener.py +88 -61
- octavia/api/v2/controllers/load_balancer.py +52 -34
- octavia/api/v2/controllers/member.py +63 -52
- octavia/api/v2/controllers/pool.py +55 -42
- octavia/api/v2/controllers/quotas.py +5 -3
- octavia/api/v2/types/listener.py +15 -0
- octavia/cmd/octavia_worker.py +0 -3
- octavia/cmd/status.py +1 -4
- octavia/common/clients.py +25 -45
- octavia/common/config.py +64 -22
- octavia/common/constants.py +3 -2
- octavia/common/data_models.py +7 -1
- octavia/common/jinja/haproxy/combined_listeners/jinja_cfg.py +12 -1
- octavia/common/jinja/haproxy/combined_listeners/templates/macros.j2 +5 -2
- octavia/common/jinja/lvs/jinja_cfg.py +4 -2
- octavia/common/keystone.py +58 -5
- octavia/common/validate.py +35 -0
- octavia/compute/drivers/noop_driver/driver.py +6 -0
- octavia/controller/healthmanager/health_manager.py +3 -6
- octavia/controller/housekeeping/house_keeping.py +36 -37
- octavia/controller/worker/amphora_rate_limit.py +5 -4
- octavia/controller/worker/task_utils.py +57 -41
- octavia/controller/worker/v2/controller_worker.py +160 -103
- octavia/controller/worker/v2/flows/listener_flows.py +3 -0
- octavia/controller/worker/v2/flows/load_balancer_flows.py +9 -14
- octavia/controller/worker/v2/tasks/amphora_driver_tasks.py +152 -91
- octavia/controller/worker/v2/tasks/compute_tasks.py +4 -2
- octavia/controller/worker/v2/tasks/database_tasks.py +542 -400
- octavia/controller/worker/v2/tasks/network_tasks.py +119 -79
- octavia/db/api.py +26 -23
- octavia/db/base_models.py +2 -2
- octavia/db/healthcheck.py +2 -1
- octavia/db/migration/alembic_migrations/versions/632152d2d32e_add_http_strict_transport_security_.py +42 -0
- octavia/db/models.py +12 -2
- octavia/db/prepare.py +2 -0
- octavia/db/repositories.py +462 -482
- octavia/hacking/checks.py +1 -1
- octavia/network/base.py +0 -14
- octavia/network/drivers/neutron/allowed_address_pairs.py +92 -135
- octavia/network/drivers/neutron/base.py +65 -77
- octavia/network/drivers/neutron/utils.py +69 -85
- octavia/network/drivers/noop_driver/driver.py +0 -7
- octavia/statistics/drivers/update_db.py +10 -10
- octavia/tests/common/constants.py +91 -84
- octavia/tests/common/sample_data_models.py +13 -1
- octavia/tests/fixtures.py +32 -0
- octavia/tests/functional/amphorae/backend/agent/api_server/test_server.py +9 -10
- octavia/tests/functional/api/drivers/driver_agent/test_driver_agent.py +260 -15
- octavia/tests/functional/api/test_root_controller.py +3 -28
- octavia/tests/functional/api/v2/base.py +5 -3
- octavia/tests/functional/api/v2/test_amphora.py +18 -5
- octavia/tests/functional/api/v2/test_availability_zone_profiles.py +1 -0
- octavia/tests/functional/api/v2/test_listener.py +51 -19
- octavia/tests/functional/api/v2/test_load_balancer.py +10 -1
- octavia/tests/functional/db/base.py +31 -16
- octavia/tests/functional/db/test_models.py +27 -28
- octavia/tests/functional/db/test_repositories.py +407 -50
- octavia/tests/unit/amphorae/backends/agent/api_server/test_amphora_info.py +2 -0
- octavia/tests/unit/amphorae/backends/agent/api_server/test_osutils.py +1 -1
- octavia/tests/unit/amphorae/backends/agent/api_server/test_plug.py +54 -6
- octavia/tests/unit/amphorae/backends/agent/api_server/test_util.py +35 -0
- octavia/tests/unit/amphorae/backends/health_daemon/test_health_daemon.py +8 -0
- octavia/tests/unit/amphorae/backends/health_daemon/test_health_sender.py +18 -0
- octavia/tests/unit/amphorae/backends/utils/test_interface.py +81 -0
- octavia/tests/unit/amphorae/backends/utils/test_interface_file.py +2 -0
- octavia/tests/unit/amphorae/backends/utils/test_keepalivedlvs_query.py +129 -5
- octavia/tests/unit/amphorae/drivers/haproxy/test_rest_api_driver_1_0.py +42 -20
- octavia/tests/unit/amphorae/drivers/health/test_heartbeat_udp.py +18 -20
- octavia/tests/unit/amphorae/drivers/keepalived/jinja/test_jinja_cfg.py +4 -4
- octavia/tests/unit/amphorae/drivers/noop_driver/test_driver.py +4 -1
- octavia/tests/unit/api/drivers/driver_agent/test_driver_get.py +3 -3
- octavia/tests/unit/api/drivers/driver_agent/test_driver_updater.py +11 -13
- octavia/tests/unit/base.py +6 -0
- octavia/tests/unit/cmd/test_interface.py +2 -2
- octavia/tests/unit/cmd/test_status.py +2 -2
- octavia/tests/unit/common/jinja/haproxy/combined_listeners/test_jinja_cfg.py +152 -1
- octavia/tests/unit/common/sample_configs/sample_configs_combined.py +10 -3
- octavia/tests/unit/common/test_clients.py +0 -39
- octavia/tests/unit/common/test_keystone.py +54 -0
- octavia/tests/unit/common/test_validate.py +67 -0
- octavia/tests/unit/controller/healthmanager/test_health_manager.py +8 -22
- octavia/tests/unit/controller/housekeeping/test_house_keeping.py +3 -64
- octavia/tests/unit/controller/worker/test_amphora_rate_limit.py +1 -1
- octavia/tests/unit/controller/worker/test_task_utils.py +44 -24
- octavia/tests/unit/controller/worker/v2/flows/test_load_balancer_flows.py +0 -1
- octavia/tests/unit/controller/worker/v2/tasks/test_amphora_driver_tasks.py +49 -26
- octavia/tests/unit/controller/worker/v2/tasks/test_database_tasks.py +399 -196
- octavia/tests/unit/controller/worker/v2/tasks/test_database_tasks_quota.py +37 -64
- octavia/tests/unit/controller/worker/v2/tasks/test_network_tasks.py +3 -14
- octavia/tests/unit/controller/worker/v2/test_controller_worker.py +2 -2
- octavia/tests/unit/network/drivers/neutron/test_allowed_address_pairs.py +456 -561
- octavia/tests/unit/network/drivers/neutron/test_base.py +181 -194
- octavia/tests/unit/network/drivers/neutron/test_utils.py +14 -30
- octavia/tests/unit/statistics/drivers/test_update_db.py +7 -5
- {octavia-12.0.0.0rc2.data → octavia-13.0.0.0rc1.data}/data/share/octavia/README.rst +1 -1
- {octavia-12.0.0.0rc2.dist-info → octavia-13.0.0.0rc1.dist-info}/AUTHORS +4 -0
- {octavia-12.0.0.0rc2.dist-info → octavia-13.0.0.0rc1.dist-info}/METADATA +4 -4
- {octavia-12.0.0.0rc2.dist-info → octavia-13.0.0.0rc1.dist-info}/RECORD +141 -189
- {octavia-12.0.0.0rc2.dist-info → octavia-13.0.0.0rc1.dist-info}/entry_points.txt +1 -2
- octavia-13.0.0.0rc1.dist-info/pbr.json +1 -0
- octavia/api/drivers/amphora_driver/v1/__init__.py +0 -11
- octavia/api/drivers/amphora_driver/v1/driver.py +0 -547
- octavia/controller/queue/v1/__init__.py +0 -11
- octavia/controller/queue/v1/consumer.py +0 -64
- octavia/controller/queue/v1/endpoints.py +0 -160
- octavia/controller/worker/v1/__init__.py +0 -11
- octavia/controller/worker/v1/controller_worker.py +0 -1157
- octavia/controller/worker/v1/flows/__init__.py +0 -11
- octavia/controller/worker/v1/flows/amphora_flows.py +0 -610
- octavia/controller/worker/v1/flows/health_monitor_flows.py +0 -105
- octavia/controller/worker/v1/flows/l7policy_flows.py +0 -94
- octavia/controller/worker/v1/flows/l7rule_flows.py +0 -100
- octavia/controller/worker/v1/flows/listener_flows.py +0 -128
- octavia/controller/worker/v1/flows/load_balancer_flows.py +0 -692
- octavia/controller/worker/v1/flows/member_flows.py +0 -230
- octavia/controller/worker/v1/flows/pool_flows.py +0 -127
- octavia/controller/worker/v1/tasks/__init__.py +0 -11
- octavia/controller/worker/v1/tasks/amphora_driver_tasks.py +0 -453
- octavia/controller/worker/v1/tasks/cert_task.py +0 -51
- octavia/controller/worker/v1/tasks/compute_tasks.py +0 -335
- octavia/controller/worker/v1/tasks/database_tasks.py +0 -2756
- octavia/controller/worker/v1/tasks/lifecycle_tasks.py +0 -173
- octavia/controller/worker/v1/tasks/model_tasks.py +0 -41
- octavia/controller/worker/v1/tasks/network_tasks.py +0 -970
- octavia/controller/worker/v1/tasks/retry_tasks.py +0 -74
- octavia/tests/unit/api/drivers/amphora_driver/v1/__init__.py +0 -11
- octavia/tests/unit/api/drivers/amphora_driver/v1/test_driver.py +0 -824
- octavia/tests/unit/controller/queue/v1/__init__.py +0 -11
- octavia/tests/unit/controller/queue/v1/test_consumer.py +0 -61
- octavia/tests/unit/controller/queue/v1/test_endpoints.py +0 -189
- octavia/tests/unit/controller/worker/v1/__init__.py +0 -11
- octavia/tests/unit/controller/worker/v1/flows/__init__.py +0 -11
- octavia/tests/unit/controller/worker/v1/flows/test_amphora_flows.py +0 -474
- octavia/tests/unit/controller/worker/v1/flows/test_health_monitor_flows.py +0 -72
- octavia/tests/unit/controller/worker/v1/flows/test_l7policy_flows.py +0 -67
- octavia/tests/unit/controller/worker/v1/flows/test_l7rule_flows.py +0 -67
- octavia/tests/unit/controller/worker/v1/flows/test_listener_flows.py +0 -91
- octavia/tests/unit/controller/worker/v1/flows/test_load_balancer_flows.py +0 -431
- octavia/tests/unit/controller/worker/v1/flows/test_member_flows.py +0 -106
- octavia/tests/unit/controller/worker/v1/flows/test_pool_flows.py +0 -77
- octavia/tests/unit/controller/worker/v1/tasks/__init__.py +0 -11
- octavia/tests/unit/controller/worker/v1/tasks/test_amphora_driver_tasks.py +0 -792
- octavia/tests/unit/controller/worker/v1/tasks/test_cert_task.py +0 -46
- octavia/tests/unit/controller/worker/v1/tasks/test_compute_tasks.py +0 -634
- octavia/tests/unit/controller/worker/v1/tasks/test_database_tasks.py +0 -2615
- octavia/tests/unit/controller/worker/v1/tasks/test_database_tasks_quota.py +0 -415
- octavia/tests/unit/controller/worker/v1/tasks/test_lifecycle_tasks.py +0 -401
- octavia/tests/unit/controller/worker/v1/tasks/test_model_tasks.py +0 -44
- octavia/tests/unit/controller/worker/v1/tasks/test_network_tasks.py +0 -1788
- octavia/tests/unit/controller/worker/v1/tasks/test_retry_tasks.py +0 -47
- octavia/tests/unit/controller/worker/v1/test_controller_worker.py +0 -2096
- octavia-12.0.0.0rc2.dist-info/pbr.json +0 -1
- {octavia-12.0.0.0rc2.data → octavia-13.0.0.0rc1.data}/data/share/octavia/LICENSE +0 -0
- {octavia-12.0.0.0rc2.data → octavia-13.0.0.0rc1.data}/data/share/octavia/diskimage-create/README.rst +0 -0
- {octavia-12.0.0.0rc2.data → octavia-13.0.0.0rc1.data}/data/share/octavia/diskimage-create/diskimage-create.sh +0 -0
- {octavia-12.0.0.0rc2.data → octavia-13.0.0.0rc1.data}/data/share/octavia/diskimage-create/image-tests.sh +0 -0
- {octavia-12.0.0.0rc2.data → octavia-13.0.0.0rc1.data}/data/share/octavia/diskimage-create/requirements.txt +0 -0
- {octavia-12.0.0.0rc2.data → octavia-13.0.0.0rc1.data}/data/share/octavia/diskimage-create/test-requirements.txt +0 -0
- {octavia-12.0.0.0rc2.data → octavia-13.0.0.0rc1.data}/data/share/octavia/diskimage-create/tox.ini +0 -0
- {octavia-12.0.0.0rc2.data → octavia-13.0.0.0rc1.data}/data/share/octavia/diskimage-create/version.txt +0 -0
- {octavia-12.0.0.0rc2.data → octavia-13.0.0.0rc1.data}/scripts/octavia-wsgi +0 -0
- {octavia-12.0.0.0rc2.dist-info → octavia-13.0.0.0rc1.dist-info}/LICENSE +0 -0
- {octavia-12.0.0.0rc2.dist-info → octavia-13.0.0.0rc1.dist-info}/WHEEL +0 -0
- {octavia-12.0.0.0rc2.dist-info → octavia-13.0.0.0rc1.dist-info}/top_level.txt +0 -0
@@ -14,6 +14,7 @@
|
|
14
14
|
# under the License.
|
15
15
|
|
16
16
|
import ipaddress
|
17
|
+
import itertools
|
17
18
|
import os
|
18
19
|
import socket
|
19
20
|
import stat
|
@@ -24,6 +25,7 @@ import pyroute2
|
|
24
25
|
import webob
|
25
26
|
from werkzeug import exceptions
|
26
27
|
|
28
|
+
from octavia.amphorae.backends.agent.api_server import util
|
27
29
|
from octavia.common import constants as consts
|
28
30
|
|
29
31
|
|
@@ -196,6 +198,8 @@ class Plug(object):
|
|
196
198
|
fixed_ips=fixed_ips,
|
197
199
|
mtu=mtu)
|
198
200
|
self._osutils.bring_interface_up(existing_interface, 'network')
|
201
|
+
|
202
|
+
util.send_member_advertisements(fixed_ips)
|
199
203
|
return webob.Response(json={
|
200
204
|
'message': "OK",
|
201
205
|
'details': "Updated existing interface {interface}".format(
|
@@ -209,13 +213,7 @@ class Plug(object):
|
|
209
213
|
|
210
214
|
# We need to determine the interface name when inside the namespace
|
211
215
|
# to avoid name conflicts
|
212
|
-
|
213
|
-
flags=os.O_CREAT) as netns:
|
214
|
-
|
215
|
-
# 1 means just loopback, but we should already have a VIP. This
|
216
|
-
# works for the add/delete/add case as we don't delete interfaces
|
217
|
-
# Note, eth0 is skipped because that is the VIP interface
|
218
|
-
netns_interface = 'eth{0}'.format(len(netns.get_links()))
|
216
|
+
netns_interface = self._netns_get_next_interface()
|
219
217
|
|
220
218
|
LOG.info('Plugged interface %s will become %s in the namespace %s',
|
221
219
|
default_netns_interface, netns_interface,
|
@@ -236,6 +234,7 @@ class Plug(object):
|
|
236
234
|
IFLA_IFNAME=netns_interface)
|
237
235
|
|
238
236
|
self._osutils.bring_interface_up(netns_interface, 'network')
|
237
|
+
util.send_member_advertisements(fixed_ips)
|
239
238
|
|
240
239
|
return webob.Response(json={
|
241
240
|
'message': "OK",
|
@@ -246,6 +245,8 @@ class Plug(object):
|
|
246
245
|
try:
|
247
246
|
with pyroute2.IPRoute() as ipr:
|
248
247
|
idx = ipr.link_lookup(address=mac)[0]
|
248
|
+
# Workaround for https://github.com/PyCQA/pylint/issues/8497
|
249
|
+
# pylint: disable=E1136, E1121
|
249
250
|
addr = ipr.get_links(idx)[0]
|
250
251
|
for attr in addr['attrs']:
|
251
252
|
if attr[0] == consts.IFLA_IFNAME:
|
@@ -289,3 +290,16 @@ class Plug(object):
|
|
289
290
|
|
290
291
|
def _netns_interface_exists(self, mac_address):
|
291
292
|
return self._netns_interface_by_mac(mac_address) is not None
|
293
|
+
|
294
|
+
def _netns_get_next_interface(self):
|
295
|
+
with pyroute2.NetNS(consts.AMPHORA_NAMESPACE,
|
296
|
+
flags=os.O_CREAT) as netns:
|
297
|
+
existing_ifaces = [
|
298
|
+
dict(link['attrs']).get(consts.IFLA_IFNAME)
|
299
|
+
for link in netns.get_links()]
|
300
|
+
# find the first unused ethXXX
|
301
|
+
for idx in itertools.count(start=2):
|
302
|
+
iface_name = f"eth{idx}"
|
303
|
+
if iface_name not in existing_ifaces:
|
304
|
+
break
|
305
|
+
return iface_name
|
@@ -10,6 +10,8 @@ RemainAfterExit=yes
|
|
10
10
|
ExecStart=-/sbin/ip netns add {{ amphora_nsname }}
|
11
11
|
# Load the system sysctl into the new namespace
|
12
12
|
ExecStart=-/sbin/ip netns exec {{ amphora_nsname }} sysctl --system
|
13
|
+
# Enable kernel module ip_vs for lvs function in amphora network namespace
|
14
|
+
ExecStart=-/sbin/ip netns exec {{ amphora_nsname }} modprobe ip_vs
|
13
15
|
# Set nf_conntrack_buckets sysctl in the main namespace (nf_conntrack_buckets
|
14
16
|
# cannot be set in another net namespace, but its value is inherited from the
|
15
17
|
# main namespace)
|
@@ -17,8 +19,6 @@ ExecStart=-/sbin/sysctl -w net.netfilter.nf_conntrack_buckets=125000
|
|
17
19
|
# Update conntrack table sizes using the formula for the default values
|
18
20
|
ExecStart=-/sbin/sysctl -w net.netfilter.nf_conntrack_max=125000
|
19
21
|
ExecStart=-/sbin/sysctl -w net.netfilter.nf_conntrack_expect_max=488
|
20
|
-
# Enable kernel module ip_vs for lvs function in amphora network namespace
|
21
|
-
ExecStart=-/sbin/ip netns exec {{ amphora_nsname }} modprobe ip_vs
|
22
22
|
# Enable ip_forward and conntrack kernel configuration
|
23
23
|
ExecStart=-/sbin/ip netns exec {{ amphora_nsname }} sysctl -w net.ipv4.ip_forward=1
|
24
24
|
ExecStart=-/sbin/ip netns exec {{ amphora_nsname }} sysctl -w net.ipv4.vs.conntrack=1
|
@@ -17,6 +17,7 @@ import os
|
|
17
17
|
import re
|
18
18
|
import stat
|
19
19
|
import subprocess
|
20
|
+
import typing as tp
|
20
21
|
|
21
22
|
import jinja2
|
22
23
|
from oslo_config import cfg
|
@@ -411,3 +412,23 @@ def send_vip_advertisements(lb_id):
|
|
411
412
|
except Exception as e:
|
412
413
|
LOG.debug('Send VIP advertisement failed due to :%s. '
|
413
414
|
'This amphora may not be the MASTER. Ignoring.', str(e))
|
415
|
+
|
416
|
+
|
417
|
+
def send_member_advertisements(fixed_ips: tp.Iterable[tp.Dict[str, str]]):
|
418
|
+
"""Sends advertisements for each fixed_ip of a list
|
419
|
+
|
420
|
+
This method will send either GARP (IPv4) or neighbor advertisements (IPv6)
|
421
|
+
for the addresses of the subnets of the members.
|
422
|
+
|
423
|
+
:param fixed_ips: a list of dicts that contain 'ip_address' elements
|
424
|
+
:returns: None
|
425
|
+
"""
|
426
|
+
try:
|
427
|
+
for fixed_ip in fixed_ips:
|
428
|
+
ip_address = fixed_ip[consts.IP_ADDRESS]
|
429
|
+
interface = network_utils.get_interface_name(
|
430
|
+
ip_address, net_ns=consts.AMPHORA_NAMESPACE)
|
431
|
+
ip_advertisement.send_ip_advertisement(
|
432
|
+
interface, ip_address, net_ns=consts.AMPHORA_NAMESPACE)
|
433
|
+
except Exception as e:
|
434
|
+
LOG.debug('Send member advertisement failed due to: %s', str(e))
|
@@ -157,9 +157,15 @@ def run_sender(cmd_queue):
|
|
157
157
|
|
158
158
|
|
159
159
|
def get_stats(stat_sock_file):
|
160
|
-
|
161
|
-
|
162
|
-
|
160
|
+
try:
|
161
|
+
stats_query = haproxy_query.HAProxyQuery(stat_sock_file)
|
162
|
+
stats = stats_query.show_stat()
|
163
|
+
pool_status = stats_query.get_pool_status()
|
164
|
+
except Exception as e:
|
165
|
+
LOG.warning('Unable to query the HAProxy stats (%s) due to: %s',
|
166
|
+
stat_sock_file, str(e))
|
167
|
+
# Return empty lists so that the heartbeat will still be sent
|
168
|
+
return [], {}
|
163
169
|
return stats, pool_status
|
164
170
|
|
165
171
|
|
@@ -70,6 +70,8 @@ class UDPStatusSender(object):
|
|
70
70
|
for ipport in CONF.health_manager.controller_ip_port_list:
|
71
71
|
try:
|
72
72
|
ip, port = ipport.rsplit(':', 1)
|
73
|
+
if ip and ip[0] == '[' and ip[-1] == ']':
|
74
|
+
ip = ip[1:-1]
|
73
75
|
except ValueError:
|
74
76
|
LOG.error("Invalid ip and port '%s' in health_manager "
|
75
77
|
"controller_ip_port_list", ipport)
|
@@ -181,6 +181,8 @@ class InterfaceController(object):
|
|
181
181
|
with pyroute2.IPRoute() as ipr:
|
182
182
|
idx = ipr.link_lookup(ifname=interface.name)[0]
|
183
183
|
|
184
|
+
# Workaround for https://github.com/PyCQA/pylint/issues/8497
|
185
|
+
# pylint: disable=E1136, E1121
|
184
186
|
link = ipr.get_links(idx)[0]
|
185
187
|
current_state = link.get(consts.STATE)
|
186
188
|
|
@@ -195,7 +197,9 @@ class InterfaceController(object):
|
|
195
197
|
|
196
198
|
self._addresses_up(interface, ipr, idx)
|
197
199
|
self._routes_up(interface, ipr, idx)
|
198
|
-
|
200
|
+
# only the vip port updates the rules
|
201
|
+
if interface.if_type == consts.VIP:
|
202
|
+
self._rules_up(interface, ipr, idx)
|
199
203
|
|
200
204
|
self._scripts_up(interface, current_state)
|
201
205
|
|
@@ -370,15 +374,19 @@ class InterfaceController(object):
|
|
370
374
|
with pyroute2.IPRoute() as ipr:
|
371
375
|
idx = ipr.link_lookup(ifname=interface.name)[0]
|
372
376
|
|
377
|
+
# Workaround for https://github.com/PyCQA/pylint/issues/8497
|
378
|
+
# pylint: disable=E1136, E1121
|
373
379
|
link = ipr.get_links(idx)[0]
|
374
380
|
current_state = link.get(consts.STATE)
|
375
381
|
|
376
382
|
if current_state == consts.IFACE_UP:
|
377
|
-
|
378
|
-
|
379
|
-
|
380
|
-
|
381
|
-
|
383
|
+
# only the vip port updates the rules
|
384
|
+
if interface.if_type == consts.VIP:
|
385
|
+
for rule in interface.rules:
|
386
|
+
rule[consts.FAMILY] = self._family(rule[consts.SRC])
|
387
|
+
LOG.debug("%s: Deleting rule %s", interface.name, rule)
|
388
|
+
self._ipr_command(ipr.rule, self.DELETE,
|
389
|
+
raise_on_error=False, **rule)
|
382
390
|
|
383
391
|
for route in interface.routes:
|
384
392
|
route[consts.FAMILY] = self._family(route[consts.DST])
|
@@ -25,9 +25,11 @@ CONF = cfg.CONF
|
|
25
25
|
|
26
26
|
|
27
27
|
class InterfaceFile(object):
|
28
|
-
def __init__(self, name,
|
28
|
+
def __init__(self, name, if_type,
|
29
|
+
mtu=None, addresses=None,
|
29
30
|
routes=None, rules=None, scripts=None):
|
30
31
|
self.name = name
|
32
|
+
self.if_type = if_type
|
31
33
|
self.mtu = mtu
|
32
34
|
self.addresses = addresses or []
|
33
35
|
self.routes = routes or []
|
@@ -92,6 +94,7 @@ class InterfaceFile(object):
|
|
92
94
|
flags, mode), 'w') as fp:
|
93
95
|
interface = {
|
94
96
|
consts.NAME: self.name,
|
97
|
+
consts.IF_TYPE: self.if_type,
|
95
98
|
consts.ADDRESSES: self.addresses,
|
96
99
|
consts.ROUTES: self.routes,
|
97
100
|
consts.RULES: self.rules,
|
@@ -105,7 +108,7 @@ class InterfaceFile(object):
|
|
105
108
|
class VIPInterfaceFile(InterfaceFile):
|
106
109
|
def __init__(self, name, mtu, vips, vrrp_info, fixed_ips, topology):
|
107
110
|
|
108
|
-
super().__init__(name, mtu=mtu)
|
111
|
+
super().__init__(name, if_type=consts.VIP, mtu=mtu)
|
109
112
|
|
110
113
|
has_ipv4 = any(vip['ip_version'] == 4 for vip in vips)
|
111
114
|
has_ipv6 = any(vip['ip_version'] == 6 for vip in vips)
|
@@ -236,7 +239,7 @@ class VIPInterfaceFile(InterfaceFile):
|
|
236
239
|
|
237
240
|
class PortInterfaceFile(InterfaceFile):
|
238
241
|
def __init__(self, name, mtu, fixed_ips):
|
239
|
-
super().__init__(name, mtu=mtu)
|
242
|
+
super().__init__(name, if_type=consts.BACKEND, mtu=mtu)
|
240
243
|
|
241
244
|
if fixed_ips:
|
242
245
|
ip_versions = set()
|
@@ -94,13 +94,10 @@ def get_listener_realserver_mapping(ns_name, listener_ip_ports,
|
|
94
94
|
if 'RemoteAddress:Port' in line:
|
95
95
|
result_keys = re.split(r'\s+',
|
96
96
|
LVS_KEY_REGEX.findall(line)[0].strip())
|
97
|
-
elif (
|
98
|
-
|
99
|
-
|
100
|
-
|
101
|
-
elif re.match(r'^(UDP|SCTP)\s+%s\s+\w+' % idex,
|
102
|
-
line):
|
103
|
-
find_target_block = True
|
97
|
+
elif (line.startswith(constants.PROTOCOL_UDP) or
|
98
|
+
line.startswith(lib_consts.PROTOCOL_SCTP)):
|
99
|
+
find_target_block = re.match(r'^(UDP|SCTP)\s+%s\s+\w+' % idex,
|
100
|
+
line) is not None
|
104
101
|
elif find_target_block and line:
|
105
102
|
rs_is_ipv4 = True
|
106
103
|
all_values = V4_RS_VALUE_REGEX.findall(line)
|
@@ -136,7 +133,7 @@ def get_listener_realserver_mapping(ns_name, listener_ip_ports,
|
|
136
133
|
result_keys[index]] = result_values[index]
|
137
134
|
continue
|
138
135
|
|
139
|
-
return
|
136
|
+
return actual_member_result
|
140
137
|
|
141
138
|
|
142
139
|
def get_lvs_listener_resource_ipports_nsname(listener_id):
|
@@ -267,7 +264,7 @@ def get_lvs_listener_pool_status(listener_id):
|
|
267
264
|
cfg = f.read()
|
268
265
|
hm_enabled = len(CHECKER_REGEX.findall(cfg)) > 0
|
269
266
|
|
270
|
-
|
267
|
+
realserver_result = get_listener_realserver_mapping(
|
271
268
|
ns_name, resource_ipport_mapping['Listener']['ipports'],
|
272
269
|
hm_enabled)
|
273
270
|
pool_status = constants.UP
|
@@ -460,6 +457,8 @@ def get_lvs_listeners_stats():
|
|
460
457
|
status = constants.OPEN
|
461
458
|
# Get scur
|
462
459
|
for listener_ipport in listener_ipports:
|
460
|
+
if listener_ipport not in scur_res:
|
461
|
+
continue
|
463
462
|
for m in scur_res[listener_ipport]['Members']:
|
464
463
|
for item in m:
|
465
464
|
if item[0] == 'ActiveConn':
|
@@ -151,8 +151,7 @@ class AmphoraLoadBalancerDriver(object, metaclass=abc.ABCMeta):
|
|
151
151
|
"""
|
152
152
|
|
153
153
|
def post_vip_plug(self, amphora, load_balancer, amphorae_network_config,
|
154
|
-
vrrp_port
|
155
|
-
additional_vip_data=None):
|
154
|
+
vrrp_port, vip_subnet, additional_vip_data=None):
|
156
155
|
"""Called after network driver has allocated and plugged the VIP
|
157
156
|
|
158
157
|
:param amphora:
|
@@ -37,10 +37,9 @@ import octavia.common.jinja.haproxy.combined_listeners.jinja_cfg as jinja_combo
|
|
37
37
|
from octavia.common.jinja.lvs import jinja_cfg as jinja_udp_cfg
|
38
38
|
from octavia.common.tls_utils import cert_parser
|
39
39
|
from octavia.common import utils
|
40
|
-
from octavia.db import api as
|
40
|
+
from octavia.db import api as db_api
|
41
41
|
from octavia.db import models as db_models
|
42
42
|
from octavia.db import repositories as repo
|
43
|
-
from octavia.network import data_models as network_models
|
44
43
|
|
45
44
|
|
46
45
|
LOG = logging.getLogger(__name__)
|
@@ -183,9 +182,10 @@ class HaproxyAmphoraLoadBalancerDriver(
|
|
183
182
|
'"%s". Skipping this listener.',
|
184
183
|
listener.id, str(e))
|
185
184
|
listener_repo = repo.ListenerRepository()
|
186
|
-
|
187
|
-
|
188
|
-
|
185
|
+
with db_api.session().begin() as session:
|
186
|
+
listener_repo.update(session, listener.id,
|
187
|
+
provisioning_status=consts.ERROR,
|
188
|
+
operating_status=consts.ERROR)
|
189
189
|
|
190
190
|
if has_tcp:
|
191
191
|
if listeners_to_update:
|
@@ -358,26 +358,16 @@ class HaproxyAmphoraLoadBalancerDriver(
|
|
358
358
|
return net_info
|
359
359
|
|
360
360
|
def post_vip_plug(self, amphora, load_balancer, amphorae_network_config,
|
361
|
-
vrrp_port
|
362
|
-
additional_vip_data=None):
|
361
|
+
vrrp_port, vip_subnet, additional_vip_data=None):
|
363
362
|
if amphora.status != consts.DELETED:
|
364
363
|
self._populate_amphora_api_version(amphora)
|
365
|
-
|
366
|
-
|
367
|
-
if vrrp_port is None:
|
368
|
-
port = amphorae_network_config.get(amphora.id).vrrp_port
|
369
|
-
mtu = port.network.mtu
|
370
|
-
else:
|
371
|
-
port = vrrp_port
|
372
|
-
mtu = port.network['mtu']
|
364
|
+
port = vrrp_port.to_dict(recurse=True)
|
365
|
+
mtu = port[consts.NETWORK][consts.MTU]
|
373
366
|
LOG.debug("Post-VIP-Plugging with vrrp_ip %s vrrp_port %s",
|
374
|
-
amphora.vrrp_ip, port.
|
367
|
+
amphora.vrrp_ip, port[consts.ID])
|
375
368
|
net_info = self._build_net_info(
|
376
|
-
port
|
369
|
+
port, amphora.to_dict(),
|
377
370
|
vip_subnet.to_dict(recurse=True), mtu)
|
378
|
-
if additional_vip_data is None:
|
379
|
-
additional_vip_data = amphorae_network_config.get(
|
380
|
-
amphora.id).additional_vip_data
|
381
371
|
for add_vip in additional_vip_data:
|
382
372
|
add_host_routes = [{'nexthop': hr.nexthop,
|
383
373
|
'destination': hr.destination}
|
@@ -393,7 +383,7 @@ class HaproxyAmphoraLoadBalancerDriver(
|
|
393
383
|
except exc.Conflict:
|
394
384
|
LOG.warning('VIP with MAC %(mac)s already exists on amphora, '
|
395
385
|
'skipping post_vip_plug',
|
396
|
-
{'mac': port.
|
386
|
+
{'mac': port[consts.MAC_ADDRESS]})
|
397
387
|
|
398
388
|
def post_network_plug(self, amphora, port, amphora_network_config):
|
399
389
|
fixed_ips = []
|
@@ -410,10 +400,6 @@ class HaproxyAmphoraLoadBalancerDriver(
|
|
410
400
|
'fixed_ips': fixed_ips,
|
411
401
|
'mtu': port.network.mtu}
|
412
402
|
if port.id == amphora.vrrp_port_id:
|
413
|
-
if isinstance(amphora_network_config,
|
414
|
-
network_models.AmphoraNetworkConfig):
|
415
|
-
amphora_network_config = amphora_network_config.to_dict(
|
416
|
-
recurse=True)
|
417
403
|
# We have to special-case sharing the vrrp port and pass through
|
418
404
|
# enough extra information to populate the whole VIP port
|
419
405
|
net_info = self._build_net_info(
|
@@ -331,8 +331,9 @@ class UpdateHealthDb:
|
|
331
331
|
session = db_api.get_session()
|
332
332
|
|
333
333
|
# We need to see if all of the listeners are reporting in
|
334
|
-
|
335
|
-
|
334
|
+
with session.begin():
|
335
|
+
db_lb = self.amphora_repo.get_lb_for_health_update(session,
|
336
|
+
health['id'])
|
336
337
|
ignore_listener_count = False
|
337
338
|
|
338
339
|
if db_lb:
|
@@ -353,11 +354,13 @@ class UpdateHealthDb:
|
|
353
354
|
l for k, l in db_lb.get('listeners', {}).items()
|
354
355
|
if l['protocol'] == constants.PROTOCOL_UDP]
|
355
356
|
if udp_listeners:
|
356
|
-
|
357
|
-
|
358
|
-
|
357
|
+
with session.begin():
|
358
|
+
expected_listener_count = (
|
359
|
+
self._update_listener_count_for_UDP(
|
360
|
+
session, db_lb, expected_listener_count))
|
359
361
|
else:
|
360
|
-
|
362
|
+
with session.begin():
|
363
|
+
amp = self.amphora_repo.get(session, id=health['id'])
|
361
364
|
# This is debug and not warning because this can happen under
|
362
365
|
# normal deleting operations.
|
363
366
|
LOG.debug('Received a health heartbeat from amphora %s with '
|
@@ -392,8 +395,6 @@ class UpdateHealthDb:
|
|
392
395
|
# does not match the expected listener count
|
393
396
|
if len(listeners) == expected_listener_count or ignore_listener_count:
|
394
397
|
|
395
|
-
lock_session = db_api.get_session(autocommit=False)
|
396
|
-
|
397
398
|
# if we're running too far behind, warn and bail
|
398
399
|
proc_delay = time.time() - health['recv_time']
|
399
400
|
hb_interval = CONF.health_manager.heartbeat_interval
|
@@ -409,6 +410,9 @@ class UpdateHealthDb:
|
|
409
410
|
{'id': health['id'], 'delay': proc_delay})
|
410
411
|
return
|
411
412
|
|
413
|
+
lock_session = db_api.get_session()
|
414
|
+
lock_session.begin()
|
415
|
+
|
412
416
|
# if the input amphora is healthy, we update its db info
|
413
417
|
try:
|
414
418
|
self.amphora_health_repo.replace(
|
@@ -472,9 +476,10 @@ class UpdateHealthDb:
|
|
472
476
|
try:
|
473
477
|
if (listener_status is not None and
|
474
478
|
listener_status != db_op_status):
|
475
|
-
|
476
|
-
|
477
|
-
|
479
|
+
with session.begin():
|
480
|
+
self._update_status(
|
481
|
+
session, self.listener_repo, constants.LISTENER,
|
482
|
+
listener_id, listener_status, db_op_status)
|
478
483
|
except sqlalchemy.orm.exc.NoResultFound:
|
479
484
|
LOG.error("Listener %s is not in DB", listener_id)
|
480
485
|
|
@@ -496,9 +501,11 @@ class UpdateHealthDb:
|
|
496
501
|
if db_pool_id in processed_pools:
|
497
502
|
continue
|
498
503
|
db_pool_dict = db_lb['pools'][db_pool_id]
|
499
|
-
|
500
|
-
|
501
|
-
|
504
|
+
with session.begin():
|
505
|
+
lb_status = self._process_pool_status(
|
506
|
+
session, db_pool_id, db_pool_dict, pools,
|
507
|
+
lb_status, processed_pools,
|
508
|
+
potential_offline_pools)
|
502
509
|
|
503
510
|
if health_msg_version >= 2:
|
504
511
|
raw_pools = health['pools']
|
@@ -514,9 +521,10 @@ class UpdateHealthDb:
|
|
514
521
|
if db_pool_id in processed_pools:
|
515
522
|
continue
|
516
523
|
db_pool_dict = db_lb['pools'][db_pool_id]
|
517
|
-
|
518
|
-
|
519
|
-
|
524
|
+
with session.begin():
|
525
|
+
lb_status = self._process_pool_status(
|
526
|
+
session, db_pool_id, db_pool_dict, pools,
|
527
|
+
lb_status, processed_pools, potential_offline_pools)
|
520
528
|
|
521
529
|
for pool_id, pool in potential_offline_pools.items():
|
522
530
|
# Skip if we eventually found a status for this pool
|
@@ -525,19 +533,21 @@ class UpdateHealthDb:
|
|
525
533
|
try:
|
526
534
|
# If the database doesn't already show the pool offline, update
|
527
535
|
if pool != constants.OFFLINE:
|
528
|
-
|
529
|
-
|
530
|
-
|
536
|
+
with session.begin():
|
537
|
+
self._update_status(
|
538
|
+
session, self.pool_repo, constants.POOL,
|
539
|
+
pool_id, constants.OFFLINE, pool)
|
531
540
|
except sqlalchemy.orm.exc.NoResultFound:
|
532
541
|
LOG.error("Pool %s is not in DB", pool_id)
|
533
542
|
|
534
543
|
# Update the load balancer status last
|
535
544
|
try:
|
536
545
|
if lb_status != db_lb['operating_status']:
|
537
|
-
|
538
|
-
|
539
|
-
|
540
|
-
|
546
|
+
with session.begin():
|
547
|
+
self._update_status(
|
548
|
+
session, self.loadbalancer_repo,
|
549
|
+
constants.LOADBALANCER, db_lb['id'], lb_status,
|
550
|
+
db_lb[constants.OPERATING_STATUS])
|
541
551
|
except sqlalchemy.orm.exc.NoResultFound:
|
542
552
|
LOG.error("Load balancer %s is not in DB", db_lb.id)
|
543
553
|
|
@@ -60,9 +60,7 @@ class KeepalivedJinjaTemplater(object):
|
|
60
60
|
|
61
61
|
:param loadbalancer: A loadbalancer object
|
62
62
|
:param amphora: An amphora object
|
63
|
-
:param amp_net_config: The amphora network config,
|
64
|
-
an AmphoraeNetworkConfig object in amphorav1,
|
65
|
-
a dict in amphorav2
|
63
|
+
:param amp_net_config: The amphora network config, a dict
|
66
64
|
"""
|
67
65
|
# Note on keepalived configuration: The current base configuration
|
68
66
|
# enforced Master election whenever a high priority VRRP instance
|
@@ -74,15 +72,8 @@ class KeepalivedJinjaTemplater(object):
|
|
74
72
|
peers_ips = []
|
75
73
|
|
76
74
|
# Get the VIP subnet for the amphora
|
77
|
-
|
78
|
-
|
79
|
-
additional_vip_data = amp_net_config['additional_vip_data']
|
80
|
-
vip_subnet = amp_net_config[constants.VIP_SUBNET]
|
81
|
-
else:
|
82
|
-
additional_vip_data = [
|
83
|
-
add_vip.to_dict(recurse=True)
|
84
|
-
for add_vip in amp_net_config.additional_vip_data]
|
85
|
-
vip_subnet = amp_net_config.vip_subnet.to_dict()
|
75
|
+
additional_vip_data = amp_net_config['additional_vip_data']
|
76
|
+
vip_subnet = amp_net_config[constants.VIP_SUBNET]
|
86
77
|
|
87
78
|
# Sort VIPs by their IP so we can guarantee interface_index matching
|
88
79
|
sorted_add_vips = sorted(additional_vip_data,
|
@@ -93,8 +93,7 @@ class NoopManager(object):
|
|
93
93
|
'post_network_plug')
|
94
94
|
|
95
95
|
def post_vip_plug(self, amphora, load_balancer, amphorae_network_config,
|
96
|
-
vrrp_port
|
97
|
-
additional_vip_data=None):
|
96
|
+
vrrp_port, vip_subnet, additional_vip_data=None):
|
98
97
|
LOG.debug("Amphora %s no-op, post vip plug load balancer %s",
|
99
98
|
self.__class__.__name__, load_balancer.id)
|
100
99
|
self.amphoraconfig[(load_balancer.id, id(amphorae_network_config))] = (
|
@@ -167,12 +166,11 @@ class NoopAmphoraLoadBalancerDriver(
|
|
167
166
|
self.driver.post_network_plug(amphora, port, amphora_network_config)
|
168
167
|
|
169
168
|
def post_vip_plug(self, amphora, load_balancer, amphorae_network_config,
|
170
|
-
vrrp_port
|
171
|
-
additional_vip_data=None):
|
169
|
+
vrrp_port, vip_subnet, additional_vip_data=None):
|
172
170
|
|
173
171
|
self.driver.post_vip_plug(amphora,
|
174
172
|
load_balancer, amphorae_network_config,
|
175
|
-
vrrp_port
|
173
|
+
vrrp_port, vip_subnet,
|
176
174
|
additional_vip_data=additional_vip_data)
|
177
175
|
|
178
176
|
def upload_cert_amp(self, amphora, pem_file):
|
octavia/api/common/pagination.py
CHANGED
@@ -359,16 +359,16 @@ class PaginationHelper(object):
|
|
359
359
|
default = PaginationHelper._get_default_column_value(
|
360
360
|
model_attr.property.columns[0].type)
|
361
361
|
attr = sa_sql.expression.case(
|
362
|
-
|
363
|
-
|
362
|
+
(model_attr.isnot(None), model_attr),
|
363
|
+
else_=default)
|
364
364
|
crit_attrs.append((attr == marker_values[j]))
|
365
365
|
|
366
366
|
model_attr = getattr(model, self.sort_keys[i][0])
|
367
367
|
default = PaginationHelper._get_default_column_value(
|
368
368
|
model_attr.property.columns[0].type)
|
369
369
|
attr = sa_sql.expression.case(
|
370
|
-
|
371
|
-
|
370
|
+
(model_attr.isnot(None), model_attr),
|
371
|
+
else_=default)
|
372
372
|
this_sort_dir = self.sort_keys[i][1]
|
373
373
|
if this_sort_dir == constants.DESC:
|
374
374
|
if self.page_reverse == "True":
|
@@ -273,8 +273,10 @@ class AmphoraProviderDriver(driver_base.ProviderDriver):
|
|
273
273
|
# Member
|
274
274
|
def member_create(self, member):
|
275
275
|
pool_id = member.pool_id
|
276
|
-
|
277
|
-
|
276
|
+
session = db_apis.get_session()
|
277
|
+
with session.begin():
|
278
|
+
db_pool = self.repositories.pool.get(session,
|
279
|
+
id=pool_id)
|
278
280
|
self._validate_members(db_pool, [member])
|
279
281
|
|
280
282
|
payload = {consts.MEMBER: member.to_dict()}
|
@@ -296,7 +298,9 @@ class AmphoraProviderDriver(driver_base.ProviderDriver):
|
|
296
298
|
|
297
299
|
def member_batch_update(self, pool_id, members):
|
298
300
|
# The DB should not have updated yet, so we can still use the pool
|
299
|
-
|
301
|
+
session = db_apis.get_session()
|
302
|
+
with session.begin():
|
303
|
+
db_pool = self.repositories.pool.get(session, id=pool_id)
|
300
304
|
|
301
305
|
self._validate_members(db_pool, members)
|
302
306
|
|
@@ -385,8 +389,10 @@ class AmphoraProviderDriver(driver_base.ProviderDriver):
|
|
385
389
|
|
386
390
|
# L7 Policy
|
387
391
|
def l7policy_create(self, l7policy):
|
388
|
-
|
389
|
-
|
392
|
+
session = db_apis.get_session()
|
393
|
+
with session.begin():
|
394
|
+
db_listener = self.repositories.listener.get(
|
395
|
+
session, id=l7policy.listener_id)
|
390
396
|
if db_listener.protocol not in VALID_L7POLICY_LISTENER_PROTOCOLS:
|
391
397
|
msg = ('%s protocol listeners do not support L7 policies' % (
|
392
398
|
db_listener.protocol))
|